code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: List[str] , __A: Tuple , __A: List[str]=13 , __A: Union[str, Any]=7 , __A: Union[str, Any]=6 , __A: Dict=17 , __A: List[str]=23 , __A: Any=11 , __A: str=True , ) -> Any: _A = parent _A = batch_size _A = seq_length _A = act_dim _A = state_dim _A = hidden_size _A = max_length _A = is_training def __A ( self: str ) -> Optional[Any]: _A = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) _A = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) _A = floats_tensor((self.batch_size, self.seq_length, 1) ) _A = floats_tensor((self.batch_size, self.seq_length, 1) ) _A = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 ) _A = random_attention_mask((self.batch_size, self.seq_length) ) _A = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __A ( self: Union[str, Any] ) -> Optional[int]: return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __A ( self: List[Any] , __A: Tuple , __A: int , __A: Any , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[str] , __A: Any , ) -> Any: _A = DecisionTransformerModel(config=__A ) model.to(__A ) model.eval() _A = model(__A , __A , __A , __A , __A , __A ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __A ( self: str ) -> Tuple: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = { '''states''': states, '''actions''': actions, '''rewards''': rewards, '''returns_to_go''': returns_to_go, '''timesteps''': timesteps, '''attention_mask''': attention_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (DecisionTransformerModel,) if is_torch_available() else () A_ = () A_ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids A_ = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features A_ = False A_ = False A_ = False A_ = False A_ = False A_ = False A_ = False A_ = False A_ = False def __A ( self: Any ) -> Optional[int]: _A = DecisionTransformerModelTester(self ) _A = ConfigTester(self , config_class=__A , hidden_size=37 ) def __A ( self: str ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) @slow def __A ( self: Optional[int] ) -> str: for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DecisionTransformerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: List[str] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = [ '''states''', '''actions''', '''rewards''', '''returns_to_go''', '''timesteps''', '''attention_mask''', ] self.assertListEqual(arg_names[: len(__A )] , __A ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def __A ( self: str ) -> Any: _A = 2 # number of steps of autoregressive prediction we will perform _A = 10 # defined by the RL environment, may be normalized _A = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' ) _A = model.to(__A ) _A = model.config torch.manual_seed(0 ) _A = torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ) # env.reset() _A = torch.tensor( [[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__A ) _A = torch.tensor(__A , device=__A , dtype=torch.floataa ).reshape(1 , 1 , 1 ) _A = state _A = torch.zeros(1 , 0 , config.act_dim , device=__A , dtype=torch.floataa ) _A = torch.zeros(1 , 0 , device=__A , dtype=torch.floataa ) _A = torch.tensor(0 , device=__A , dtype=torch.long ).reshape(1 , 1 ) for step in range(__A ): _A = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__A )] , dim=1 ) _A = torch.cat([rewards, torch.zeros(1 , 1 , device=__A )] , dim=1 ) _A = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): _A ,_A ,_A = model( states=__A , actions=__A , rewards=__A , returns_to_go=__A , timesteps=__A , attention_mask=__A , return_dict=__A , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) _A ,_A ,_A ,_A = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ), 1.0, False, {}, ) _A = action_pred[0, -1] _A = torch.cat([states, state] , dim=1 ) _A = returns_to_go[0, -1] - reward _A = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) _A = torch.cat( [timesteps, torch.ones((1, 1) , device=__A , dtype=torch.long ) * (step + 1)] , dim=1 )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: List[Any] ) -> Union[str, Any]: _A = {} def __A ( self: str ) -> None: print(self.vertex ) for i in self.vertex: print(__A , ''' -> ''' , ''' -> '''.join([str(__A ) for j in self.vertex[i]] ) ) def __A ( self: Union[str, Any] , __A: int , __A: int ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__A ) else: # else make a new vertex _A = [to_vertex] def __A ( self: Any ) -> None: # visited array for storing already visited nodes _A = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__A , __A ) def __A ( self: List[Any] , __A: int , __A: list ) -> None: # mark start vertex as visited _A = True print(__A , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__A , __A ) if __name__ == "__main__": __A = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('DFS:') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __A = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __A = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __A = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __A ( self: str ) -> Any: _A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__A , '''depth_multiplier''' ) ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Any , __A: Tuple , __A: Optional[int]=13 , __A: Optional[Any]=3 , __A: Optional[Any]=32 , __A: Optional[Any]=0.25 , __A: Optional[Any]=8 , __A: Dict=8 , __A: str=6 , __A: Union[str, Any]=32 , __A: Any=True , __A: int=True , __A: List[Any]=True , __A: Union[str, Any]="relu6" , __A: Dict=12_80 , __A: Dict=0.1 , __A: List[str]=0.02 , __A: List[str]=True , __A: List[Any]=True , __A: Optional[Any]=10 , __A: Union[str, Any]=None , ) -> Any: _A = parent _A = batch_size _A = num_channels _A = image_size _A = depth_multiplier _A = depth_divisible_by _A = min_depth _A = expand_ratio _A = tf_padding _A = output_stride _A = first_layer_is_expansion _A = finegrained_output _A = hidden_act _A = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) _A = classifier_dropout_prob _A = use_labels _A = is_training _A = num_labels _A = initializer_range _A = scope def __A ( self: str ) -> str: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.num_labels ) _A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _A = self.get_config() return config, pixel_values, labels, pixel_labels def __A ( self: str ) -> Optional[Any]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __A ( self: List[str] , __A: Union[str, Any] , __A: List[Any] , __A: int , __A: Optional[int] ) -> Optional[Any]: _A = MobileNetVaModel(config=__A ) model.to(__A ) model.eval() _A = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Optional[int] , __A: Union[str, Any] , __A: List[str] ) -> Any: _A = self.num_labels _A = MobileNetVaForImageClassification(__A ) model.to(__A ) model.eval() _A = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self: Optional[int] , __A: Tuple , __A: str , __A: Dict , __A: List[Any] ) -> Optional[Any]: _A = self.num_labels _A = MobileNetVaForSemanticSegmentation(__A ) model.to(__A ) model.eval() _A = model(__A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _A = model(__A , labels=__A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self: str ) -> Optional[Any]: _A = self.prepare_config_and_inputs() _A ,_A ,_A ,_A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) A_ = ( { "feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification, "image-segmentation": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: Any ) -> Union[str, Any]: _A = MobileNetVaModelTester(self ) _A = MobileNetVaConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: Dict ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def __A ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def __A ( self: List[str] ) -> int: pass def __A ( self: Any ) -> Union[str, Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) def __A ( self: int ) -> str: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __A ( self: List[Any] ) -> Optional[Any]: def check_hidden_states_output(__A: Tuple , __A: List[str] , __A: Optional[Any] ): _A = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__A , __A ) ) _A = outputs.hidden_states _A = 16 self.assertEqual(len(__A ) , __A ) _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True check_hidden_states_output(__A , __A , __A ) def __A ( self: Any ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) @slow def __A ( self: Union[str, Any] ) -> Optional[int]: for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = MobileNetVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: int ) -> Any: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def __A ( self: Tuple ) -> Union[str, Any]: _A = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): _A = model(**__A ) # verify the logits _A = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape , __A ) _A = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) ) @slow def __A ( self: Union[str, Any] ) -> Tuple: _A = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) _A = model.to(__A ) _A = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): _A = model(**__A ) _A = outputs.logits # verify the logits _A = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , __A ) _A = torch.tensor( [ [[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]], [[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]], [[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]], ] , device=__A , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 ) )
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __A = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } __A = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def __A ( _lowercase , _lowercase=False ): '''simple docstring''' _A ,_A = create_model( '''HTSAT-tiny''' , '''roberta''' , _lowercase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowercase , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def __A ( _lowercase ): '''simple docstring''' _A = {} _A = R'''.*sequential.(\d+).*''' _A = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _A = key.replace(_lowercase , _lowercase ) if re.match(_lowercase , _lowercase ): # replace sequential layers with list _A = re.match(_lowercase , _lowercase ).group(1 ) _A = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(_lowercase )//3}.linear.""" ) elif re.match(_lowercase , _lowercase ): _A = int(re.match(_lowercase , _lowercase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _A = 1 if projecton_layer == 0 else 2 _A = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _A = value _A = mixed_qkv.size(0 ) // 3 _A = mixed_qkv[:qkv_dim] _A = mixed_qkv[qkv_dim : qkv_dim * 2] _A = mixed_qkv[qkv_dim * 2 :] _A = query_layer _A = key_layer _A = value_layer else: _A = value return model_state_dict def __A ( _lowercase , _lowercase , _lowercase , _lowercase=False ): '''simple docstring''' _A ,_A = init_clap(_lowercase , enable_fusion=_lowercase ) clap_model.eval() _A = clap_model.state_dict() _A = rename_state_dict(_lowercase ) _A = ClapConfig() _A = enable_fusion _A = ClapModel(_lowercase ) # ignore the spectrogram embedding layer model.load_state_dict(_lowercase , strict=_lowercase ) model.save_pretrained(_lowercase ) transformers_config.save_pretrained(_lowercase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') __A = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["torch", "torchsde"] def __init__( self: str , *__A: str , **__A: Optional[int] ) -> List[Any]: requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def __A ( cls: int , *__A: List[Any] , **__A: str ) -> Any: requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def __A ( cls: Dict , *__A: List[str] , **__A: Dict ) -> Dict: requires_backends(cls , ['''torch''', '''torchsde'''] )
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = CLIPConfig A_ = ["CLIPEncoderLayer"] def __init__( self: Optional[Any] , __A: CLIPConfig ) -> Any: super().__init__(__A ) _A = CLIPVisionModelWithProjection(config.vision_config ) _A = nn.Linear(config.vision_config.projection_dim , 1 ) _A = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def __A ( self: str , __A: Any , __A: Optional[int] , __A: Tuple=0.5 , __A: List[Any]=0.5 ) -> int: _A = self.vision_model(__A )[0] _A = self.p_head(__A ) _A = nsfw_detected.flatten() _A = nsfw_detected > p_threshold _A = nsfw_detected.tolist() if any(__A ): logger.warning( '''Potential NSFW content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, nsfw_detected_ in enumerate(__A ): if nsfw_detected_: _A = np.zeros(images[idx].shape ) _A = self.w_head(__A ) _A = watermark_detected.flatten() _A = watermark_detected > w_threshold _A = watermark_detected.tolist() if any(__A ): logger.warning( '''Potential watermarked content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, watermark_detected_ in enumerate(__A ): if watermark_detected_: _A = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "new-model" if is_tf_available(): class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = NewModelConfig @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def __A ( self: Optional[Any] ) -> Union[str, Any]: _A = '''bert-base-cased''' _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModel.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __A ( self: Optional[int] ) -> Any: _A = '''bert-base-cased''' _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelForPreTraining.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __A ( self: Dict ) -> List[str]: for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelForCausalLM.from_pretrained(__A ) _A ,_A = TFAutoModelForCausalLM.from_pretrained(__A , output_loading_info=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __A ( self: Tuple ) -> str: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelWithLMHead.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __A ( self: int ) -> int: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelForMaskedLM.from_pretrained(__A ) _A ,_A = TFAutoModelForMaskedLM.from_pretrained(__A , output_loading_info=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __A ( self: str ) -> Optional[Any]: for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelForSeqaSeqLM.from_pretrained(__A ) _A ,_A = TFAutoModelForSeqaSeqLM.from_pretrained(__A , output_loading_info=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __A ( self: Union[str, Any] ) -> List[Any]: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelForSequenceClassification.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __A ( self: Optional[int] ) -> List[Any]: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelForQuestionAnswering.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow @require_tensorflow_probability def __A ( self: Any ) -> str: for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _A = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) _A = TFAutoModelForTableQuestionAnswering.from_pretrained(__A ) _A ,_A = TFAutoModelForTableQuestionAnswering.from_pretrained( __A , output_loading_info=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) def __A ( self: List[str] ) -> Dict: _A = TFAutoModelWithLMHead.from_pretrained(__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_44_10 ) def __A ( self: Union[str, Any] ) -> Optional[int]: _A = TFAutoModelWithLMHead.from_pretrained(__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_44_10 ) def __A ( self: Optional[Any] ) -> Optional[Any]: # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel _A = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(__A , __A ) _A = copy.deepcopy(model.config ) _A = ['''FunnelBaseModel'''] _A = TFAutoModel.from_config(__A ) self.assertIsInstance(__A , __A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__A ) _A = TFAutoModel.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __A ( self: Optional[Any] ) -> int: try: AutoConfig.register('''new-model''' , __A ) _A = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__A ): auto_class.register(__A , __A ) auto_class.register(__A , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): auto_class.register(__A , __A ) # Now that the config is registered, it can be used as any other config with the auto-API _A = BertModelTester(self ).get_config() _A = NewModelConfig(**tiny_config.to_dict() ) _A = auto_class.from_config(__A ) self.assertIsInstance(__A , __A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__A ) _A = auto_class.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def __A ( self: Optional[Any] ) -> int: with self.assertRaisesRegex( __A , '''bert-base is not a local folder and is not a valid model identifier''' ): _A = TFAutoModel.from_pretrained('''bert-base''' ) def __A ( self: List[str] ) -> List[Any]: with self.assertRaisesRegex( __A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _A = TFAutoModel.from_pretrained(__A , revision='''aaaaaa''' ) def __A ( self: Tuple ) -> Optional[int]: with self.assertRaisesRegex( __A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): _A = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __A ( self: int ) -> List[Any]: with self.assertRaisesRegex(__A , '''Use `from_pt=True` to load this model''' ): _A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def __A ( self: List[str] ) -> List[str]: # Make sure we have cached the model. _A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: _A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _A = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: _A = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'huggingface/informer-tourism-monthly': ( 'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json' ), # See all Informer models at https://huggingface.co/models?filter=informer } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "informer" A_ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self: Union[str, Any] , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "student_t" , __A: str = "nll" , __A: int = 1 , __A: List[int] = None , __A: Optional[Union[str, bool]] = "mean" , __A: int = 0 , __A: int = 0 , __A: int = 0 , __A: int = 0 , __A: Optional[List[int]] = None , __A: Optional[List[int]] = None , __A: int = 64 , __A: int = 32 , __A: int = 32 , __A: int = 2 , __A: int = 2 , __A: int = 2 , __A: int = 2 , __A: bool = True , __A: str = "gelu" , __A: float = 0.05 , __A: float = 0.1 , __A: float = 0.1 , __A: float = 0.1 , __A: float = 0.1 , __A: int = 1_00 , __A: float = 0.02 , __A: List[str]=True , __A: str = "prob" , __A: int = 5 , __A: bool = True , **__A: Optional[Any] , ) -> List[Any]: # time series specific configuration _A = prediction_length _A = context_length or prediction_length _A = distribution_output _A = loss _A = input_size _A = num_time_features _A = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] _A = scaling _A = num_dynamic_real_features _A = num_static_real_features _A = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(__A ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) _A = cardinality else: _A = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(__A ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) _A = embedding_dimension else: _A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _A = num_parallel_samples # Transformer architecture configuration _A = input_size * len(self.lags_sequence ) + self._number_of_features _A = d_model _A = encoder_attention_heads _A = decoder_attention_heads _A = encoder_ffn_dim _A = decoder_ffn_dim _A = encoder_layers _A = decoder_layers _A = dropout _A = attention_dropout _A = activation_dropout _A = encoder_layerdrop _A = decoder_layerdrop _A = activation_function _A = init_std _A = use_cache # Informer _A = attention_type _A = sampling_factor _A = distil super().__init__(is_encoder_decoder=__A , **__A ) @property def __A ( self: Optional[int] ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED __A = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } __A = { 'allenai/led-base-16384': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __A ( ): '''simple docstring''' _A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) _A = bs[:] _A = 0 for b in range(2**8 ): if b not in bs: bs.append(_lowercase ) cs.append(2**8 + n ) n += 1 _A = [chr(_lowercase ) for n in cs] return dict(zip(_lowercase , _lowercase ) ) def __A ( _lowercase ): '''simple docstring''' _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] , __A: List[str] , __A: str , __A: Any="replace" , __A: str="<s>" , __A: Optional[int]="</s>" , __A: Optional[Any]="</s>" , __A: Dict="<s>" , __A: Any="<unk>" , __A: int="<pad>" , __A: int="<mask>" , __A: Union[str, Any]=False , **__A: Optional[Any] , ) -> int: _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__A ) _A = {v: k for k, v in self.encoder.items()} _A = errors # how to handle errors in decoding _A = bytes_to_unicode() _A = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[1:-1] _A = [tuple(merge.split() ) for merge in bpe_merges] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = {} _A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def __A ( self: Union[str, Any] ) -> str: return len(self.encoder ) def __A ( self: List[Any] ) -> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self: Union[str, Any] , __A: Union[str, Any] ) -> Tuple: if token in self.cache: return self.cache[token] _A = tuple(__A ) _A = get_pairs(__A ) if not pairs: return token while True: _A = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A ,_A = bigram _A = [] _A = 0 while i < len(__A ): try: _A = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__A ) _A = new_word if len(__A ) == 1: break else: _A = get_pairs(__A ) _A = ''' '''.join(__A ) _A = word return word def __A ( self: Any , __A: int ) -> Dict: _A = [] for token in re.findall(self.pat , __A ): _A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(''' ''' ) ) return bpe_tokens def __A ( self: List[str] , __A: str ) -> List[str]: return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def __A ( self: Optional[Any] , __A: List[str] ) -> List[Any]: return self.decoder.get(__A ) def __A ( self: Union[str, Any] , __A: str ) -> Optional[int]: _A = ''''''.join(__A ) _A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def __A ( self: Union[str, Any] , __A: str , __A: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '''\n''' ) _A = 0 with open(__A , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__A ) + '''\n''' ) index += 1 return vocab_file, merge_file def __A ( self: List[Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __A ( self: str , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def __A ( self: Optional[Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self: Dict , __A: List[Any] , __A: Any=False , **__A: Optional[Any] ) -> Optional[int]: _A = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): _A = ''' ''' + text return (text, kwargs) def __A ( self: List[Any] , __A: Union[Dict[str, EncodedInput], BatchEncoding] , __A: Optional[int] = None , __A: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A: Optional[int] = None , __A: Optional[bool] = None , ) -> dict: _A = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: _A = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _A = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _A = len(encoded_inputs['''global_attention_mask'''] ) != len(__A ) if needs_to_be_padded: _A = len(__A ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _A = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": _A = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = IFPipeline A_ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} A_ = TEXT_TO_IMAGE_BATCH_PARAMS A_ = PipelineTesterMixin.required_optional_params - {"latents"} def __A ( self: List[str] ) -> str: return self._get_dummy_components() def __A ( self: Tuple , __A: Any , __A: Tuple=0 ) -> Union[str, Any]: if str(__A ).startswith('''mps''' ): _A = torch.manual_seed(__A ) else: _A = torch.Generator(device=__A ).manual_seed(__A ) _A = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __A ( self: Any ) -> int: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __A ( self: str ) -> Tuple: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __A ( self: int ) -> Optional[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __A ( self: int ) -> Dict: self._test_save_load_local() def __A ( self: str ) -> List[Any]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __A ( self: int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Union[str, Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self: Any ) -> Union[str, Any]: # if _A = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) _A = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) _A ,_A = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _A = None _A = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(__A , __A , __A , __A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _A = IFImgaImgPipeline(**pipe_a.components ) _A = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(__A , __A , __A , __A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _A = IFInpaintingPipeline(**pipe_a.components ) _A = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(__A , __A , __A , __A ) def __A ( self: Union[str, Any] , __A: List[Any] , __A: List[str] , __A: Union[str, Any] , __A: List[str] ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = pipe_a( prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='''np''' , ) _A = output.images[0] assert image.shape == (64, 64, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(__A , __A ) # pipeline 2 _start_torch_memory_measurement() _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A ) _A = pipe_a( prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='''np''' , ) _A = output.images[0] assert image.shape == (2_56, 2_56, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(__A , __A ) def __A ( self: Tuple , __A: Optional[Any] , __A: List[Any] , __A: int , __A: Union[str, Any] ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A ) _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = pipe_a( prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='''np''' , ) _A = output.images[0] assert image.shape == (64, 64, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(__A , __A ) # pipeline 2 _start_torch_memory_measurement() _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__A ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A ) _A = pipe_a( prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='''np''' , ) _A = output.images[0] assert image.shape == (2_56, 2_56, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(__A , __A ) def __A ( self: Tuple , __A: Any , __A: Union[str, Any] , __A: Dict , __A: Any ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A ) _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = pipe_a( prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='''np''' , ) _A = output.images[0] assert image.shape == (64, 64, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(__A , __A ) # pipeline 2 _start_torch_memory_measurement() _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A ) _A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__A ) _A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__A ) _A = pipe_a( prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='''np''' , ) _A = output.images[0] assert image.shape == (2_56, 2_56, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(__A , __A ) def __A ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_CAUSAL_LM_MAPPING A_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __A ( self: List[Any] ) -> List[str]: _A = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output _A = text_generator('''This is a test''' , do_sample=__A ) self.assertEqual( __A , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) _A = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __A , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) _A = text_generator('''This is a test''' , do_sample=__A , num_return_sequences=2 , return_tensors=__A ) self.assertEqual( __A , [ {'''generated_token_ids''': ANY(__A )}, {'''generated_token_ids''': ANY(__A )}, ] , ) _A = text_generator.model.config.eos_token_id _A = '''<pad>''' _A = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__A , num_return_sequences=2 , batch_size=2 , return_tensors=__A , ) self.assertEqual( __A , [ [ {'''generated_token_ids''': ANY(__A )}, {'''generated_token_ids''': ANY(__A )}, ], [ {'''generated_token_ids''': ANY(__A )}, {'''generated_token_ids''': ANY(__A )}, ], ] , ) @require_tf def __A ( self: int ) -> List[Any]: _A = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output _A = text_generator('''This is a test''' , do_sample=__A ) self.assertEqual( __A , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) _A = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__A ) self.assertEqual( __A , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __A ( self: Dict , __A: Tuple , __A: Union[str, Any] , __A: List[Any] ) -> Any: _A = TextGenerationPipeline(model=__A , tokenizer=__A ) return text_generator, ["This is a test", "Another test"] def __A ( self: Any ) -> Any: _A = '''Hello I believe in''' _A = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) _A = text_generator(__A ) self.assertEqual( __A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) _A = text_generator(__A , stop_sequence=''' fe''' ) self.assertEqual(__A , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __A ( self: int , __A: List[str] , __A: List[str] ) -> str: _A = text_generator.model _A = text_generator.tokenizer _A = text_generator('''This is a test''' ) self.assertEqual(__A , [{'''generated_text''': ANY(__A )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) _A = text_generator('''This is a test''' , return_full_text=__A ) self.assertEqual(__A , [{'''generated_text''': ANY(__A )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) _A = pipeline(task='''text-generation''' , model=__A , tokenizer=__A , return_full_text=__A ) _A = text_generator('''This is a test''' ) self.assertEqual(__A , [{'''generated_text''': ANY(__A )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) _A = text_generator('''This is a test''' , return_full_text=__A ) self.assertEqual(__A , [{'''generated_text''': ANY(__A )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) _A = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__A ) self.assertEqual( __A , [ [{'''generated_text''': ANY(__A )}, {'''generated_text''': ANY(__A )}], [{'''generated_text''': ANY(__A )}, {'''generated_text''': ANY(__A )}], ] , ) if text_generator.tokenizer.pad_token is not None: _A = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__A ) self.assertEqual( __A , [ [{'''generated_text''': ANY(__A )}, {'''generated_text''': ANY(__A )}], [{'''generated_text''': ANY(__A )}, {'''generated_text''': ANY(__A )}], ] , ) with self.assertRaises(__A ): _A = text_generator('''test''' , return_full_text=__A , return_text=__A ) with self.assertRaises(__A ): _A = text_generator('''test''' , return_full_text=__A , return_tensors=__A ) with self.assertRaises(__A ): _A = text_generator('''test''' , return_text=__A , return_tensors=__A ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): _A = text_generator('''''' ) self.assertEqual(__A , [{'''generated_text''': ANY(__A )}] ) else: with self.assertRaises((ValueError, AssertionError) ): _A = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. _A = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_00 , max_new_tokens=20 ) _A = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__A ): text_generator( '''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __A ( self: Union[str, Any] ) -> Union[str, Any]: import torch # Classic `model_kwargs` _A = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) _A = pipe('''This is a test''' ) self.assertEqual( __A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) _A = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) _A = pipe('''This is a test''' ) self.assertEqual( __A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 _A = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) _A = pipe('''This is a test''' ) self.assertEqual( __A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __A ( self: int ) -> List[str]: import torch _A = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __A ( self: Optional[int] ) -> List[Any]: import torch _A = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__A , top_p=0.5 ) def __A ( self: str ) -> int: _A = '''Hello world''' _A = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": _A = logging.get_logger('''transformers.generation.tf_utils''' ) else: _A = logging.get_logger('''transformers.generation.utils''' ) _A = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__A ) as cl: _A = text_generator(__A , max_length=10 , max_new_tokens=1 ) self.assertIn(__A , cl.out ) # The user only sets one -> no warning with CaptureLogger(__A ) as cl: _A = text_generator(__A , max_new_tokens=1 ) self.assertNotIn(__A , cl.out ) with CaptureLogger(__A ) as cl: _A = text_generator(__A , max_length=10 ) self.assertNotIn(__A , cl.out )
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) __A = logging.getLogger() __A = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __A ( self: Any , __A: Union[str, Any] ) -> Optional[Any]: os.makedirs(__A , exist_ok=__A ) _A = {'''source''': '''What is love ?''', '''target''': '''life'''} _A = {'''train''': 12, '''val''': 2, '''test''': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: _A = '''\n'''.join([contents[field]] * n_lines[split] ) with open(os.path.join(__A , f"""{split}.{field}""" ) , '''w''' ) as f: f.write(__A ) def __A ( self: Dict , __A: int , __A: str = "pytorch" ) -> Union[str, Any]: _A = self.get_auto_remove_tmp_dir() _A = os.path.join(__A , '''output''' ) _A = os.path.join(__A , '''data''' ) self._create_dummy_data(data_dir=__A ) _A = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append('''--fp16''' ) else: testargs.append('''--gpus=0''' ) testargs.append('''--distributed_backend=ddp_cpu''' ) testargs.append('''--num_processes=2''' ) _A = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__A , env=self.get_env() ) _A = os.path.join(__A , '''metrics.json''' ) with open(__A ) as f: _A = json.load(__A ) return result @require_torch_gpu def __A ( self: Any ) -> Any: _A = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) @require_torch_multi_gpu def __A ( self: Optional[Any] ) -> Optional[int]: _A = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) @require_torch_gpu @require_ray def __A ( self: str ) -> Tuple: _A = self._run_finetune(gpus=1 , distributed_retriever='''ray''' ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) @require_torch_multi_gpu @require_ray def __A ( self: List[Any] ) -> Optional[int]: _A = self._run_finetune(gpus=1 , distributed_retriever='''ray''' ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = AlbertConfig.from_json_file(_lowercase ) print(f"""Building PyTorch model from configuration: {config}""" ) _A = AlbertForPreTraining(_lowercase ) # Load weights from tf checkpoint load_tf_weights_in_albert(_lowercase , _lowercase , _lowercase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _lowercase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __A ( self: Optional[Any] ) -> int: _A = tempfile.mkdtemp() _A = 8 # DPR tok _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _A = {'''unk_token''': '''<unk>'''} _A = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: int ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __A ( self: Union[str, Any] ) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __A ( self: int ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __A ( self: Dict ) -> str: shutil.rmtree(self.tmpdirname ) def __A ( self: int ) -> Dict: _A = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __A ( self: Optional[Any] ) -> str: _A = self.get_dummy_dataset() _A = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: _A = dataset _A = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __A ( self: Dict , __A: bool ) -> int: _A = self.get_dummy_dataset() _A = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: _A = os.path.join(self.tmpdirname , '''dataset''' ) _A = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset _A = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: _A = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def __A ( self: Optional[Any] ) -> List[Any]: _A = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) _A = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) _A = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) _A = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(__A , open(__A , '''wb''' ) ) _A = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) _A = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __A ( self: Tuple ) -> Dict: _A = 1 _A = self.get_dummy_canonical_hf_index_retriever() _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A ,_A ,_A = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __A ( self: Optional[int] ) -> Dict: _A = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: _A = self.get_dummy_dataset() retriever.save_pretrained(__A ) _A = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def __A ( self: Any ) -> List[Any]: _A = 1 _A = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A ,_A ,_A = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __A ( self: str ) -> Dict: _A = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) _A = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def __A ( self: str ) -> List[str]: _A = 1 _A = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A ,_A ,_A = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __A ( self: int ) -> Optional[Any]: _A = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) _A = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def __A ( self: int ) -> str: _A = 1 _A = self.get_dummy_legacy_index_retriever() _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A ,_A ,_A = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , __A ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __A ( self: int ) -> List[Any]: _A = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) _A = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __A ( self: Union[str, Any] ) -> List[Any]: import torch _A = 1 _A = self.get_dummy_canonical_hf_index_retriever() _A = [[5, 7], [10, 11]] _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) _A ,_A ,_A = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) _A = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='''pt''' , ) _A ,_A ,_A ,_A = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __A ( self: int ) -> str: _A = self.get_dpr_ctx_encoder_tokenizer() _A = 1 _A = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) _A = [[5, 7], [10, 11]] _A = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __A ) # check for doc token related keys in dictionary.
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self: Optional[Any] , __A: Optional[Any] , __A: Optional[int]=7 , __A: List[Any]=3 , __A: Tuple=18 , __A: Dict=30 , __A: List[Any]=4_00 , __A: int=True , __A: str=None , __A: List[str]=True , __A: Dict=None , __A: Optional[int]=True , __A: Optional[int]=[0.5, 0.5, 0.5] , __A: str=[0.5, 0.5, 0.5] , ) -> Dict: _A = size if size is not None else {'''shortest_edge''': 18} _A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_center_crop _A = crop_size _A = do_normalize _A = image_mean _A = image_std def __A ( self: int ) -> List[str]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = LevitImageProcessor if is_vision_available() else None def __A ( self: Dict ) -> Union[str, Any]: _A = LevitImageProcessingTester(self ) @property def __A ( self: Optional[int] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self: Union[str, Any] ) -> Dict: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , '''image_mean''' ) ) self.assertTrue(hasattr(__A , '''image_std''' ) ) self.assertTrue(hasattr(__A , '''do_normalize''' ) ) self.assertTrue(hasattr(__A , '''do_resize''' ) ) self.assertTrue(hasattr(__A , '''do_center_crop''' ) ) self.assertTrue(hasattr(__A , '''size''' ) ) def __A ( self: int ) -> Tuple: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Optional[int] ) -> Optional[Any]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __A ( self: List[str] ) -> Tuple: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __A ( self: List[str] ) -> str: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: List[str] , __A: int = 6 ) -> None: _A = None _A = None self.create_linked_list(__A ) def __A ( self: Union[str, Any] , __A: int ) -> None: _A = Node() _A = current_node _A = current_node _A = current_node for _ in range(1 , __A ): _A = Node() _A = current_node _A = previous_node _A = current_node _A = self.front _A = previous_node def __A ( self: int ) -> bool: return ( self.front == self.rear and self.front is not None and self.front.data is None ) def __A ( self: Dict ) -> Any | None: self.check_can_perform_operation() return self.front.data if self.front else None def __A ( self: Dict , __A: Any ) -> None: if self.rear is None: return self.check_is_full() if not self.is_empty(): _A = self.rear.next if self.rear: _A = data def __A ( self: List[str] ) -> Any: self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: _A = self.front.data _A = None return data _A = self.front _A = old_front.next _A = old_front.data _A = None return data def __A ( self: Any ) -> None: if self.is_empty(): raise Exception('''Empty Queue''' ) def __A ( self: List[str] ) -> None: if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Dict ) -> None: _A = None _A = None _A = None if __name__ == "__main__": import doctest doctest.testmod()
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __A ( _lowercase , _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _A = JsonDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read() _check_json_dataset(_lowercase , _lowercase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = features.copy() if features else default_expected_features _A = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) _A = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read() _check_json_dataset(_lowercase , _lowercase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} _A = features.copy() if features else default_expected_features _A = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) _A = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read() assert isinstance(_lowercase , _lowercase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} _A = features.copy() _A = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) _A = tmp_path / '''cache''' _A = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read() assert isinstance(_lowercase , _lowercase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = JsonDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read() _check_json_dataset(_lowercase , _lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if issubclass(_lowercase , _lowercase ): _A = jsonl_path elif issubclass(_lowercase , _lowercase ): _A = [jsonl_path] _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = JsonDatasetReader(_lowercase , cache_dir=_lowercase ).read() _check_json_dataset(_lowercase , _lowercase ) def __A ( _lowercase , _lowercase , _lowercase=("train",) ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) for split in splits: _A = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _A = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read() _check_json_datasetdict(_lowercase , _lowercase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = features.copy() if features else default_expected_features _A = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) _A = JsonDatasetReader({'''train''': jsonl_path} , features=_lowercase , cache_dir=_lowercase ).read() _check_json_datasetdict(_lowercase , _lowercase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if split: _A = {split: jsonl_path} else: _A = '''train''' _A = {'''train''': jsonl_path, '''test''': jsonl_path} _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = JsonDatasetReader(_lowercase , cache_dir=_lowercase ).read() _check_json_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __A ( _lowercase ): '''simple docstring''' return json.load(_lowercase ) def __A ( _lowercase ): '''simple docstring''' return [json.loads(_lowercase ) for line in buffer] class SCREAMING_SNAKE_CASE : """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def __A ( self: Optional[Any] , __A: Tuple , __A: Any , __A: Optional[int] ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(__A , __A , lines=__A ).write() buffer.seek(0 ) _A = load_json_function(__A ) assert isinstance(__A , __A ) assert isinstance(exported_content[0] , __A ) assert len(__A ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: List[Any] , __A: Union[str, Any] , __A: Optional[int] , __A: List[Any] ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(__A , __A , lines=__A , orient=__A ).write() buffer.seek(0 ) _A = load_json(__A ) assert isinstance(__A , __A ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__A , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__A ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def __A ( self: int , __A: Union[str, Any] , __A: Union[str, Any] , __A: Optional[Any] ) -> Tuple: with io.BytesIO() as buffer: JsonDatasetWriter(__A , __A , lines=__A , num_proc=2 ).write() buffer.seek(0 ) _A = load_json_function(__A ) assert isinstance(__A , __A ) assert isinstance(exported_content[0] , __A ) assert len(__A ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def __A ( self: Tuple , __A: int , __A: List[str] , __A: List[Any] , __A: List[Any] , __A: List[Any] ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(__A , __A , lines=__A , orient=__A , num_proc=2 ).write() buffer.seek(0 ) _A = load_json(__A ) assert isinstance(__A , __A ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__A , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__A ) == 10 def __A ( self: Tuple , __A: Optional[int] ) -> Optional[int]: with pytest.raises(__A ): with io.BytesIO() as buffer: JsonDatasetWriter(__A , __A , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def __A ( self: Dict , __A: Tuple , __A: Dict , __A: Tuple , __A: Optional[Any] , __A: int ) -> Optional[Any]: _A = tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}""" _A = str(shared_datadir / f"""test_file.json.{extension}""" ) JsonDatasetWriter(__A , __A , compression=__A ).write() with fsspec.open(__A , '''rb''' , compression='''infer''' ) as f: _A = f.read() with fsspec.open(__A , '''rb''' , compression='''infer''' ) as f: _A = f.read() assert exported_content == original_content
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
import qiskit def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = qiskit.Aer.get_backend('''aer_simulator''' ) _A = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator _A = qiskit.execute(_lowercase , _lowercase , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(_lowercase ) if __name__ == "__main__": __A = half_adder(1, 1) print(f'Half Adder Output Qubit Counts: {counts}')
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = ["image_processor", "tokenizer"] A_ = "BlipImageProcessor" A_ = ("BertTokenizer", "BertTokenizerFast") def __init__( self: str , __A: List[Any] , __A: int ) -> Optional[int]: _A = False super().__init__(__A , __A ) _A = self.image_processor def __call__( self: Any , __A: ImageInput = None , __A: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A: bool = True , __A: Union[bool, str, PaddingStrategy] = False , __A: Union[bool, str, TruncationStrategy] = None , __A: Optional[int] = None , __A: int = 0 , __A: Optional[int] = None , __A: Optional[bool] = None , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = True , __A: Optional[Union[str, TensorType]] = None , **__A: int , ) -> BatchEncoding: if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: _A = self.tokenizer _A = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values _A = self.image_processor(__A , return_tensors=__A ) if text is not None: _A = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: _A = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def __A ( self: Optional[int] , *__A: Optional[int] , **__A: Optional[Any] ) -> int: return self.tokenizer.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Any , **__A: List[str] ) -> Union[str, Any]: return self.tokenizer.decode(*__A , **__A ) @property def __A ( self: Tuple ) -> Tuple: _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self: Optional[int] , __A: Optional[int] , __A: Optional[Any]=7 , __A: Any=3 , __A: Dict=18 , __A: List[Any]=30 , __A: Union[str, Any]=4_00 , __A: Tuple=True , __A: Dict=None , __A: Optional[int]=True , ) -> Optional[int]: _A = size if size is not None else {'''height''': 18, '''width''': 18} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = apply_ocr def __A ( self: List[Any] ) -> Optional[Any]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __A ( self: Any ) -> Optional[int]: _A = LayoutLMvaImageProcessingTester(self ) @property def __A ( self: Optional[int] ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self: Optional[Any] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , '''do_resize''' ) ) self.assertTrue(hasattr(__A , '''size''' ) ) self.assertTrue(hasattr(__A , '''apply_ocr''' ) ) def __A ( self: Optional[int] ) -> Optional[int]: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __A ( self: List[str] ) -> Union[str, Any]: pass def __A ( self: Optional[Any] ) -> Tuple: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , __A ) self.assertIsInstance(encoding.boxes , __A ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __A ( self: Optional[int] ) -> str: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __A ( self: Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __A ( self: str ) -> Optional[Any]: # with apply_OCR = True _A = LayoutLMvaImageProcessor() from datasets import load_dataset _A = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) _A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) _A = image_processing(__A , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 _A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 _A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __A ) self.assertListEqual(encoding.boxes , __A ) # with apply_OCR = False _A = LayoutLMvaImageProcessor(apply_ocr=__A ) _A = image_processing(__A , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
from __future__ import annotations class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: str , __A: int ) -> None: _A = data _A = None _A = None def __A ( _lowercase ): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def __A ( _lowercase ): '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def __A ( _lowercase ): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def __A ( ): # Main function for testing. '''simple docstring''' _A = Node(1 ) _A = Node(2 ) _A = Node(3 ) _A = Node(4 ) _A = Node(5 ) _A = Node(6 ) _A = Node(7 ) _A = Node(8 ) _A = Node(9 ) print(is_full_binary_tree(_lowercase ) ) print(depth_of_tree(_lowercase ) ) print('''Tree is: ''' ) display(_lowercase ) if __name__ == "__main__": main()
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = BigBirdConfig.from_json_file(_lowercase ) print(f"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: _A = BigBirdForQuestionAnswering(_lowercase ) else: _A = BigBirdForPreTraining(_lowercase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_lowercase , _lowercase , is_trivia_qa=_lowercase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_lowercase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--big_bird_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.' ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __A = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' for attribute in key.split('''.''' ): _A = getattr(_lowercase , _lowercase ) if weight_type is not None: _A = getattr(_lowercase , _lowercase ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = [] _A = fairseq_model.state_dict() _A = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _A = None for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == '''group''' , ) _A = True elif name.split('''.''' )[0] == "proj": _A = fairseq_model.proj _A = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_lowercase )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _lowercase ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "bias" in name: _A = '''bias''' elif "weight" in name: _A = '''weight''' else: _A = None set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _A = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _A = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _A = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _A = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A ,_A = emb.weight.shape _A = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) _A = emb.weight.data return lin_layer def __A ( _lowercase ): '''simple docstring''' with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f: _A = f.readlines() _A = [line.split(''' ''' )[0] for line in lines] _A = len(_lowercase ) _A = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(_lowercase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ): '''simple docstring''' _A = WavaVecaConfig.from_pretrained(_lowercase ) _A = SpeechaTextaConfig.from_pretrained( _lowercase , vocab_size=_lowercase , decoder_layers=_lowercase , do_stable_layer_norm=_lowercase ) _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , ) _A ,_A ,_A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _A = model[0].eval() # set weights for wav2vec2 encoder _A = WavaVecaModel(_lowercase ) _A = recursively_load_weights_wavaveca(model.encoder , _lowercase ) _A = SpeechaTextaForCausalLM(_lowercase ) _A ,_A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _A = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _A = SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase ) _A = False # add projection layer _A = nn.Parameter(projection_layer.weight ) _A = nn.Parameter(projection_layer.bias ) _A = create_vocab_dict(_lowercase ) with open(os.path.join(_lowercase , '''vocab.json''' ) , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) _A = SpeechaTextaTokenizer(os.path.join(_lowercase , '''vocab.json''' ) ) tokenizer.save_pretrained(_lowercase ) _A = hf_wavavec.config.to_dict() _A = tokenizer.pad_token_id _A = tokenizer.bos_token_id _A = tokenizer.eos_token_id _A = '''speech_to_text_2''' _A = '''wav2vec2''' _A = SpeechEncoderDecoderConfig.from_dict(_lowercase ) hf_wavavec.save_pretrained(_lowercase ) feature_extractor.save_pretrained(_lowercase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=10224, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __A = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Any , __A: Tuple , __A: Optional[Any]=13 , __A: List[Any]=32 , __A: str=2 , __A: Optional[int]=3 , __A: Dict=16 , __A: Tuple=[1, 2, 1] , __A: str=[2, 2, 4] , __A: List[str]=2 , __A: Tuple=2.0 , __A: List[str]=True , __A: List[Any]=0.0 , __A: List[Any]=0.0 , __A: int=0.1 , __A: str="gelu" , __A: Optional[Any]=False , __A: str=True , __A: List[str]=0.02 , __A: Dict=1e-5 , __A: Union[str, Any]=True , __A: str=None , __A: List[Any]=True , __A: Optional[Any]=10 , __A: Dict=8 , __A: Optional[int]=["stage1", "stage2", "stage3"] , __A: List[Any]=[1, 2, 3] , ) -> Tuple: _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = depths _A = num_heads _A = window_size _A = mlp_ratio _A = qkv_bias _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = drop_path_rate _A = hidden_act _A = use_absolute_embeddings _A = patch_norm _A = layer_norm_eps _A = initializer_range _A = is_training _A = scope _A = use_labels _A = type_sequence_label_size _A = encoder_stride _A = out_features _A = out_indices def __A ( self: List[Any] ) -> Tuple: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def __A ( self: str ) -> Dict: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def __A ( self: Optional[int] , __A: str , __A: Any , __A: Dict ) -> str: _A = MaskFormerSwinModel(config=__A ) model.to(__A ) model.eval() _A = model(__A ) _A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __A ( self: Optional[Any] , __A: Dict , __A: Any , __A: List[str] ) -> List[Any]: _A = MaskFormerSwinBackbone(config=__A ) model.to(__A ) model.eval() _A = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__A ): _A = ['''stem'''] _A = MaskFormerSwinBackbone(config=__A ) def __A ( self: List[str] ) -> Dict: _A = self.prepare_config_and_inputs() _A ,_A ,_A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False A_ = False def __A ( self: Optional[int] ) -> Optional[Any]: _A = MaskFormerSwinModelTester(self ) _A = ConfigTester(self , config_class=__A , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with''' ''' `nn.DataParallel`''' ) ) def __A ( self: Optional[int] ) -> List[Any]: pass def __A ( self: str ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self: int ) -> Optional[Any]: return def __A ( self: Optional[int] ) -> Union[str, Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __A ( self: Dict ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__A ) @unittest.skip('''Swin does not use inputs_embeds''' ) def __A ( self: Dict ) -> Any: pass @unittest.skip('''Swin does not support feedforward chunking''' ) def __A ( self: Tuple ) -> List[str]: pass def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def __A ( self: Tuple ) -> Optional[int]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' ) def __A ( self: Any ) -> List[Any]: pass @unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' ) def __A ( self: Union[str, Any] ) -> Union[str, Any]: pass def __A ( self: Optional[int] , __A: int , __A: Union[str, Any] , __A: List[Any] , __A: Tuple ) -> Any: _A = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__A , __A ) ) _A = outputs.hidden_states _A = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__A ) , __A ) # Swin has a different seq_length _A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __A ( self: Any ) -> Union[str, Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() _A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A = True self.check_hidden_states_output(__A , __A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True self.check_hidden_states_output(__A , __A , __A , __A ) def __A ( self: int ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() _A = 3 _A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A = True self.check_hidden_states_output(__A , __A , __A , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True self.check_hidden_states_output(__A , __A , __A , (padded_height, padded_width) ) @unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' ) def __A ( self: List[str] ) -> Union[str, Any]: pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def __A ( self: Tuple ) -> Dict: pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def __A ( self: Dict ) -> str: pass def __A ( self: str ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__A: int ): _A = 0 return t def check_equivalence(__A: List[Any] , __A: Optional[Any] , __A: str , __A: List[Any]={} ): with torch.no_grad(): _A = model(**__A , return_dict=__A , **__A ) _A = model(**__A , return_dict=__A , **__A ).to_tuple() def recursive_check(__A: Tuple , __A: Any ): if isinstance(__A , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__A , __A ): recursive_check(__A , __A ) elif isinstance(__A , __A ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(__A , __A ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__A ) , set_nan_tensor_to_zero(__A ) , atol=1e-5 ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(__A ).any()} and `inf`: {torch.isinf(__A )}. Dict has""" f""" `nan`: {torch.isnan(__A ).any()} and `inf`: {torch.isinf(__A )}.""" ) , ) recursive_check(__A , __A ) for model_class in self.all_model_classes: _A = model_class(__A ) model.to(__A ) model.eval() _A = self._prepare_for_class(__A , __A ) _A = self._prepare_for_class(__A , __A ) check_equivalence(__A , __A , __A ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) check_equivalence(__A , __A , __A ) _A = self._prepare_for_class(__A , __A ) _A = self._prepare_for_class(__A , __A ) check_equivalence(__A , __A , __A , {'''output_hidden_states''': True} ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) check_equivalence(__A , __A , __A , {'''output_hidden_states''': True} ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case ): """simple docstring""" A_ = (MaskFormerSwinBackbone,) if is_torch_available() else () A_ = MaskFormerSwinConfig def __A ( self: Union[str, Any] ) -> int: _A = MaskFormerSwinModelTester(self ) def __A ( self: Tuple ) -> Any: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() _A = inputs_dict['''pixel_values'''].shape[0] for backbone_class in self.all_model_classes: _A = backbone_class(__A ) backbone.to(__A ) backbone.eval() _A = backbone(**__A ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __A ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _A = backbone(**__A , output_hidden_states=__A ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A ,_A ,_A = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _A = backbone(**__A , output_attentions=__A ) self.assertIsNotNone(outputs.attentions )
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
import argparse import copy def __A ( _lowercase ): '''simple docstring''' _A = {} with open(_lowercase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _A = [] _list.append([line.split()[1], line.split()[2]] ) _A = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _A = [] _list.append([line.split()[0], line.split()[2]] ) _A = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __A ( _lowercase , _lowercase ): '''simple docstring''' with open(_lowercase ) as f: _A = f.read(1 ) _A = start_node _A = [] _A = start_node _A = 0 while visiting not in first_solution: _A = 1_00_00 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowercase ) and k[0] not in first_solution: _A = k[1] _A = k[0] first_solution.append(_lowercase ) _A = distance_of_first_solution + int(_lowercase ) _A = best_node first_solution.append(_lowercase ) _A = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _A = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_00_00 ) return first_solution, distance_of_first_solution def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = [] for n in solution[1:-1]: _A = solution.index(_lowercase ) for kn in solution[1:-1]: _A = solution.index(_lowercase ) if n == kn: continue _A = copy.deepcopy(_lowercase ) _A = kn _A = n _A = 0 for k in _tmp[:-1]: _A = _tmp[_tmp.index(_lowercase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _A = distance + int(i[1] ) _tmp.append(_lowercase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _A = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowercase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = 1 _A = first_solution _A = [] _A = distance_of_first_solution _A = solution while count <= iters: _A = find_neighborhood(_lowercase , _lowercase ) _A = 0 _A = neighborhood[index_of_best_solution] _A = len(_lowercase ) - 1 _A = False while not found: _A = 0 while i < len(_lowercase ): if best_solution[i] != solution[i]: _A = best_solution[i] _A = solution[i] break _A = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _A = True _A = best_solution[:-1] _A = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _A = cost _A = solution else: _A = index_of_best_solution + 1 _A = neighborhood[index_of_best_solution] if len(_lowercase ) >= size: tabu_list.pop(0 ) _A = count + 1 return best_solution_ever, best_cost def __A ( _lowercase=None ): '''simple docstring''' _A = generate_neighbours(args.File ) _A ,_A = generate_first_solution( args.File , _lowercase ) _A ,_A = tabu_search( _lowercase , _lowercase , _lowercase , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": __A = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __A ( _lowercase , _lowercase , _lowercase = "x" , _lowercase = 10**-10 , _lowercase = 1 , ): '''simple docstring''' _A = symbols(_lowercase ) _A = lambdify(_lowercase , _lowercase ) _A = lambdify(_lowercase , diff(_lowercase , _lowercase ) ) _A = starting_point while True: if diff_function(_lowercase ) != 0: _A = prev_guess - multiplicity * func(_lowercase ) / diff_function( _lowercase ) else: raise ZeroDivisionError('''Could not find root''' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess _A = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}') # Find value of e print( 'The root of log(y) - 1 = 0 is ', f'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}', ) # Find root of cos(x) print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def __A ( ): '''simple docstring''' _A = torch.nn.Linear(2 , 4 ) _A = torch.optim.AdamW(model.parameters() , lr=1.0 ) _A = torch.optim.lr_scheduler.OneCycleLR(_lowercase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) _A = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) _A = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def __A ( _lowercase ): '''simple docstring''' return (model.weight.abs().sum() + model.bias.abs().sum()).item() def __A ( _lowercase ): '''simple docstring''' _A = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @require_cuda def __A ( self: int ) -> List[str]: _A = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(__A ): _A = Accelerator(cpu=__A ) def __A ( self: Tuple ) -> List[str]: _A = Accelerator() _A = GradientState() assert state.num_steps == 1 _A = 4 assert state.num_steps == 4 assert state.sync_gradients is True _A = False assert state.sync_gradients is False GradientState._reset_state() def __A ( self: int ) -> Union[str, Any]: _A = Accelerator() _A ,_A ,_A ,_A ,_A = create_components() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = accelerator.prepare(__A , __A , __A , __A , __A ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def __A ( self: Optional[int] ) -> Any: _A = Accelerator() _A ,_A ,_A ,_A ,_A = create_components() accelerator.prepare(__A , __A , __A , __A , __A ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def __A ( self: List[str] ) -> List[Any]: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*__A: int , **__A: Any ): pass with patch('''torch.cuda.set_device''' , __A ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): _A = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def __A ( self: Any ) -> Dict: _A = Accelerator() _A ,_A ,_A ,_A ,_A = create_components() accelerator.prepare(__A , __A , __A , __A , __A ) _A = get_signature(__A ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__A ) # make sure random weights don't match load_random_weights(__A ) self.assertTrue(abs(model_signature - get_signature(__A ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(__A ) self.assertTrue(abs(model_signature - get_signature(__A ) ) < 1e-3 ) def __A ( self: List[str] ) -> Dict: _A = Accelerator() _A ,_A ,_A ,_A ,_A = create_components() accelerator.prepare(__A , __A , __A , __A , __A ) _A = get_signature(__A ) # saving hook def save_config(__A: Optional[int] , __A: List[Any] , __A: Tuple ): _A = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(__A , '''data.json''' ) , '''w''' ) as f: json.dump(__A , __A ) # loading hook def load_config(__A: List[Any] , __A: List[Any] ): with open(os.path.join(__A , '''data.json''' ) , '''r''' ) as f: _A = json.load(__A ) _A = config['''class_name'''] _A = accelerator.register_save_state_pre_hook(__A ) _A = accelerator.register_load_state_pre_hook(__A ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__A ) # make sure random weights don't match with hooks load_random_weights(__A ) self.assertTrue(abs(model_signature - get_signature(__A ) ) > 1e-3 ) # random class name to verify correct one is loaded _A = '''random''' # make sure loaded weights match with hooks accelerator.load_state(__A ) self.assertTrue(abs(model_signature - get_signature(__A ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__A ) # make sure random weights don't match with hooks removed load_random_weights(__A ) self.assertTrue(abs(model_signature - get_signature(__A ) ) > 1e-3 ) # random class name to verify correct one is loaded _A = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(__A ) self.assertTrue(abs(model_signature - get_signature(__A ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def __A ( self: int ) -> Dict: _A = Accelerator() _A ,_A ,_A ,_A ,_A = create_components() _A = None # This should work _A ,_A ,_A ,_A ,_A ,_A = accelerator.prepare( __A , __A , __A , __A , __A , __A ) self.assertTrue(dummy_obj is None ) def __A ( self: Union[str, Any] ) -> Dict: _A = Accelerator() _A ,_A ,_A ,_A ,_A = create_components() _A = [1, 2, 3] # This should work _A ,_A ,_A ,_A ,_A ,_A = accelerator.prepare( __A , __A , __A , __A , __A , __A ) self.assertEqual( getattr(__A , '''_is_accelerate_prepared''' , __A ) , __A , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(__A , '''_is_accelerate_prepared''' , __A ) , __A , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(__A , '''_is_accelerate_prepared''' , __A ) , __A , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(__A , '''_is_accelerate_prepared''' , __A ) , __A , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(__A , '''_is_accelerate_prepared''' , __A ) , __A , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(__A , '''_is_accelerate_prepared''' , __A ) , __A , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def __A ( self: Union[str, Any] ) -> List[str]: from transformers import AutoModelForCausalLM _A = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=__A , device_map={'''''': 0} , ) _A = Accelerator() # This should work _A = accelerator.prepare(__A ) @slow @require_bnb def __A ( self: Optional[Any] ) -> Dict: from transformers import AutoModelForCausalLM _A = Accelerator() with init_empty_weights(): _A = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() _A = infer_auto_device_map(__A ) _A = '''cpu''' _A = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=__A , load_in_abit=__A , llm_inta_enable_fpaa_cpu_offload=__A ) # This should not work and get value error with self.assertRaises(__A ): _A = accelerator.prepare(__A ) @slow @require_bnb @require_multi_gpu def __A ( self: Any ) -> int: from transformers import AutoModelForCausalLM _A = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): _A = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() _A = infer_auto_device_map(__A ) _A = 1 _A = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=__A , device_map=__A , ) _A = Accelerator() # This should not work and get value error with self.assertRaises(__A ): _A = accelerator.prepare(__A ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def __A ( self: Tuple ) -> Union[str, Any]: from transformers import AutoModelForCausalLM with init_empty_weights(): _A = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) _A = infer_auto_device_map(__A ) _A = 1 _A = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=__A , device_map=__A , ) _A = Accelerator() # This should work _A = accelerator.prepare(__A ) @require_cuda def __A ( self: Tuple ) -> Optional[int]: _A = torch.nn.Linear(10 , 10 ) _A = torch.optim.SGD(model.parameters() , lr=0.01 ) _A = Accelerator(cpu=__A ) _A = accelerator.prepare(__A )
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
import math import tensorflow as tf from packaging import version def __A ( _lowercase ): '''simple docstring''' _A = tf.convert_to_tensor(_lowercase ) _A = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __A ( _lowercase ): '''simple docstring''' _A = tf.convert_to_tensor(_lowercase ) _A = tf.cast(math.pi , x.dtype ) _A = tf.cast(0.04_47_15 , x.dtype ) _A = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowercase , 3 )) )) return x * cdf def __A ( _lowercase ): '''simple docstring''' _A = tf.convert_to_tensor(_lowercase ) return x * tf.tanh(tf.math.softplus(_lowercase ) ) def __A ( _lowercase ): '''simple docstring''' _A = tf.convert_to_tensor(_lowercase ) _A = tf.cast(0.04_47_15 , x.dtype ) _A = tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __A ( _lowercase ): '''simple docstring''' _A = tf.convert_to_tensor(_lowercase ) _A = tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __A ( _lowercase ): '''simple docstring''' return tf.clip_by_value(_gelu(_lowercase ) , -10 , 10 ) def __A ( _lowercase , _lowercase=-1 ): '''simple docstring''' _A ,_A = tf.split(_lowercase , 2 , axis=_lowercase ) return a * tf.math.sigmoid(_lowercase ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def __A ( _lowercase ): '''simple docstring''' return tf.keras.activations.gelu(_lowercase , approximate=_lowercase ) __A = tf.keras.activations.gelu __A = approximate_gelu_wrap else: __A = _gelu __A = _gelu_new __A = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def __A ( _lowercase ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
def __A ( _lowercase ): '''simple docstring''' if not all(char in '''01''' for char in bin_string ): raise ValueError('''Non-binary value was passed to the function''' ) if not bin_string: raise ValueError('''Empty string was passed to the function''' ) _A = '''''' while len(_lowercase ) % 3 != 0: _A = '''0''' + bin_string _A = [ bin_string[index : index + 3] for index in range(len(_lowercase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _A = 0 for index, val in enumerate(_lowercase ): oct_val += int(2 ** (2 - index) * int(_lowercase ) ) oct_string += str(_lowercase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = ["image_processor", "feature_extractor"] A_ = "TvltImageProcessor" A_ = "TvltFeatureExtractor" def __init__( self: List[str] , __A: List[Any] , __A: List[str] ) -> List[str]: super().__init__(image_processor=__A , feature_extractor=__A ) _A = image_processor _A = feature_extractor def __call__( self: List[str] , __A: Any=None , __A: Any=None , __A: Optional[int]=None , __A: str=None , __A: List[Any]=False , __A: Tuple=False , *__A: List[str] , **__A: Optional[Any] , ) -> Optional[Any]: if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) _A = None if images is not None: _A = self.image_processor(__A , mask_pixel=__A , *__A , **__A ) if images_mixed is not None: _A = self.image_processor(__A , is_mixed=__A , *__A , **__A ) if audio is not None: _A = self.feature_extractor( __A , *__A , sampling_rate=__A , mask_audio=__A , **__A ) _A = {} if audio is not None: output_dict.update(__A ) if images is not None: output_dict.update(__A ) if images_mixed_dict is not None: output_dict.update(__A ) return output_dict @property def __A ( self: Optional[Any] ) -> int: _A = self.image_processor.model_input_names _A = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: Any , *__A: Union[str, Any] , **__A: Any ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: Dict , *__A: str , **__A: List[Any] ) -> Dict: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Optional[Any] , *__A: List[Any] , **__A: int ) -> Any: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: Dict , *__A: int , **__A: Any ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: Tuple , *__A: int , **__A: Any ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Any , *__A: int , **__A: Optional[int] ) -> Any: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: List[Any] , *__A: Union[str, Any] , **__A: Dict ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: Union[str, Any] , *__A: Dict , **__A: Optional[int] ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Any , *__A: Optional[Any] , **__A: Any ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: Union[str, Any] , *__A: str , **__A: Tuple ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: Union[str, Any] , *__A: Optional[Any] , **__A: List[Any] ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: str , *__A: Tuple , **__A: List[Any] ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: Union[str, Any] , *__A: List[str] , **__A: Dict ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: List[str] , *__A: Any , **__A: int ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Union[str, Any] , *__A: Union[str, Any] , **__A: Any ) -> int: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: List[str] , *__A: Tuple , **__A: int ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: Dict , *__A: Optional[Any] , **__A: List[str] ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Any , *__A: Dict , **__A: List[Any] ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: List[Any] , *__A: List[str] , **__A: Dict ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: int , *__A: Any , **__A: Optional[Any] ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Union[str, Any] , *__A: Optional[int] , **__A: Tuple ) -> List[Any]: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: Union[str, Any] , *__A: Any , **__A: List[str] ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: int , *__A: int , **__A: int ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Dict , *__A: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: Dict , *__A: List[Any] , **__A: Any ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: str , *__A: int , **__A: Optional[Any] ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Tuple , *__A: List[str] , **__A: Optional[int] ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: List[Any] , *__A: Optional[int] , **__A: Union[str, Any] ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: List[Any] , *__A: Any , **__A: List[Any] ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: int , *__A: List[Any] , **__A: int ) -> Any: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: List[str] , *__A: Tuple , **__A: Optional[Any] ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: Union[str, Any] , *__A: str , **__A: Dict ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: Optional[int] , *__A: Optional[Any] , **__A: List[Any] ) -> str: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: List[Any] , *__A: Any , **__A: Any ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: int , *__A: Tuple , **__A: str ) -> Dict: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: List[str] , *__A: int , **__A: Tuple ) -> Dict: requires_backends(cls , ['''flax'''] ) class SCREAMING_SNAKE_CASE ( metaclass=snake_case ): """simple docstring""" A_ = ["flax"] def __init__( self: Tuple , *__A: str , **__A: Union[str, Any] ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def __A ( cls: int , *__A: List[Any] , **__A: List[str] ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __A ( cls: int , *__A: str , **__A: str ) -> str: requires_backends(cls , ['''flax'''] )
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "efficientformer" def __init__( self: Dict , __A: List[int] = [3, 2, 6, 4] , __A: List[int] = [48, 96, 2_24, 4_48] , __A: List[bool] = [True, True, True, True] , __A: int = 4_48 , __A: int = 32 , __A: int = 4 , __A: int = 7 , __A: int = 5 , __A: int = 8 , __A: int = 4 , __A: float = 0.0 , __A: int = 16 , __A: int = 3 , __A: int = 3 , __A: int = 3 , __A: int = 2 , __A: int = 1 , __A: float = 0.0 , __A: int = 1 , __A: bool = True , __A: bool = True , __A: float = 1e-5 , __A: str = "gelu" , __A: float = 0.02 , __A: float = 1e-12 , __A: int = 2_24 , __A: float = 1e-05 , **__A: int , ) -> None: super().__init__(**__A ) _A = hidden_act _A = hidden_dropout_prob _A = hidden_sizes _A = num_hidden_layers _A = num_attention_heads _A = initializer_range _A = layer_norm_eps _A = patch_size _A = num_channels _A = depths _A = mlp_expansion_ratio _A = downsamples _A = dim _A = key_dim _A = attention_ratio _A = resolution _A = pool_size _A = downsample_patch_size _A = downsample_stride _A = downsample_pad _A = drop_path_rate _A = num_metaad_blocks _A = distillation _A = use_layer_scale _A = layer_scale_init_value _A = image_size _A = batch_norm_eps
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __A ( _lowercase , _lowercase=None ): '''simple docstring''' _A = None if token is not None: _A = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} _A = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" _A = requests.get(_lowercase , headers=_lowercase ).json() _A = {} try: job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) _A = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(_lowercase ): _A = requests.get(url + f"""&page={i + 2}""" , headers=_lowercase ).json() job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __A ( _lowercase , _lowercase=None ): '''simple docstring''' _A = None if token is not None: _A = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} _A = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" _A = requests.get(_lowercase , headers=_lowercase ).json() _A = {} try: artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) _A = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(_lowercase ): _A = requests.get(url + f"""&page={i + 2}""" , headers=_lowercase ).json() artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = None if token is not None: _A = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} _A = requests.get(_lowercase , headers=_lowercase , allow_redirects=_lowercase ) _A = result.headers['''Location'''] _A = requests.get(_lowercase , allow_redirects=_lowercase ) _A = os.path.join(_lowercase , f"""{artifact_name}.zip""" ) with open(_lowercase , '''wb''' ) as fp: fp.write(response.content ) def __A ( _lowercase , _lowercase=None ): '''simple docstring''' _A = [] _A = [] _A = None with zipfile.ZipFile(_lowercase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowercase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_lowercase ) as f: for line in f: _A = line.decode('''UTF-8''' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _A = line[: line.index(''': ''' )] _A = line[line.index(''': ''' ) + len(''': ''' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ): # `test` is the test method that failed _A = line[len('''FAILED ''' ) :] failed_tests.append(_lowercase ) elif filename == "job_name.txt": _A = line if len(_lowercase ) != len(_lowercase ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(_lowercase )} for `errors` """ f"""and {len(_lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" ''' problem.''' ) _A = None if job_name and job_links: _A = job_links.get(_lowercase , _lowercase ) # A list with elements of the form (line of error, error, failed test) _A = [x + [y] + [job_link] for x, y in zip(_lowercase , _lowercase )] return result def __A ( _lowercase , _lowercase=None ): '''simple docstring''' _A = [] _A = [os.path.join(_lowercase , _lowercase ) for p in os.listdir(_lowercase ) if p.endswith('''.zip''' )] for p in paths: errors.extend(get_errors_from_single_artifact(_lowercase , job_links=_lowercase ) ) return errors def __A ( _lowercase , _lowercase=None ): '''simple docstring''' _A = Counter() counter.update([x[1] for x in logs] ) _A = counter.most_common() _A = {} for error, count in counts: if error_filter is None or error not in error_filter: _A = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]} _A = dict(sorted(r.items() , key=lambda _lowercase : item[1]["count"] , reverse=_lowercase ) ) return r def __A ( _lowercase ): '''simple docstring''' _A = test.split('''::''' )[0] if test.startswith('''tests/models/''' ): _A = test.split('''/''' )[2] else: _A = None return test def __A ( _lowercase , _lowercase=None ): '''simple docstring''' _A = [(x[0], x[1], get_model(x[2] )) for x in logs] _A = [x for x in logs if x[2] is not None] _A = {x[2] for x in logs} _A = {} for test in tests: _A = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _A = counter.most_common() _A = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _A = sum(error_counts.values() ) if n_errors > 0: _A = {'''count''': n_errors, '''errors''': error_counts} _A = dict(sorted(r.items() , key=lambda _lowercase : item[1]["count"] , reverse=_lowercase ) ) return r def __A ( _lowercase ): '''simple docstring''' _A = '''| no. | error | status |''' _A = '''|-:|:-|:-|''' _A = [header, sep] for error in reduced_by_error: _A = reduced_by_error[error]['''count'''] _A = f"""| {count} | {error[:1_00]} | |""" lines.append(_lowercase ) return "\n".join(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = '''| model | no. of errors | major error | count |''' _A = '''|-:|-:|-:|-:|''' _A = [header, sep] for model in reduced_by_model: _A = reduced_by_model[model]['''count'''] _A ,_A = list(reduced_by_model[model]['''errors'''].items() )[0] _A = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(_lowercase ) return "\n".join(_lowercase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __A = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A = get_job_links(args.workflow_run_id, token=args.token) __A = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A = k.find(' / ') __A = k[index + len(' / ') :] __A = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A = reduce_by_error(errors) __A = reduce_by_model(errors) __A = make_github_table(reduced_by_error) __A = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __A = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self: Tuple , __A: str , __A: Dict=7 , __A: Tuple=3 , __A: Dict=18 , __A: Dict=30 , __A: Tuple=4_00 , __A: Optional[int]=None , __A: List[Any]=True , __A: int=True , __A: Optional[int]=None , ) -> str: _A = size if size is not None else {'''height''': 20, '''width''': 20} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = size _A = do_normalize _A = do_convert_rgb _A = [5_12, 10_24, 20_48, 40_96] _A = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def __A ( self: Tuple ) -> Optional[Any]: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __A ( self: str ) -> List[str]: _A = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' _A = Image.open(requests.get(__A , stream=__A ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = PixaStructImageProcessor if is_vision_available() else None def __A ( self: Optional[int] ) -> Any: _A = PixaStructImageProcessingTester(self ) @property def __A ( self: List[Any] ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self: Optional[int] ) -> Union[str, Any]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , '''do_normalize''' ) ) self.assertTrue(hasattr(__A , '''do_convert_rgb''' ) ) def __A ( self: Any ) -> Union[str, Any]: _A = self.image_processor_tester.prepare_dummy_image() _A = self.image_processing_class(**self.image_processor_dict ) _A = 20_48 _A = image_processor(__A , return_tensors='''pt''' , max_patches=__A ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) ) def __A ( self: Optional[int] ) -> List[str]: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( __A , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __A ( self: Any ) -> Tuple: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 _A = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__A ): _A = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches _A = '''Hello''' _A = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__A , header_text=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( __A , return_tensors='''pt''' , max_patches=__A , header_text=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __A ( self: Optional[int] ) -> Any: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) _A = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( __A , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __A ( self: Tuple ) -> str: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( __A , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = PixaStructImageProcessor if is_vision_available() else None def __A ( self: Optional[int] ) -> List[str]: _A = PixaStructImageProcessingTester(self , num_channels=4 ) _A = 3 @property def __A ( self: List[Any] ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self: str ) -> Dict: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , '''do_normalize''' ) ) self.assertTrue(hasattr(__A , '''do_convert_rgb''' ) ) def __A ( self: List[Any] ) -> str: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( __A , return_tensors='''pt''' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "Salesforce/blip-image-captioning-base" A_ = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) A_ = "image_captioner" A_ = AutoModelForVisionaSeq A_ = ["image"] A_ = ["text"] def __init__( self: str , *__A: str , **__A: Optional[Any] ) -> List[Any]: requires_backends(self , ['''vision'''] ) super().__init__(*__A , **__A ) def __A ( self: Union[str, Any] , __A: "Image" ) -> int: return self.pre_processor(images=__A , return_tensors='''pt''' ) def __A ( self: List[str] , __A: List[Any] ) -> Dict: return self.model.generate(**__A ) def __A ( self: List[Any] , __A: Optional[int] ) -> List[str]: return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
from typing import Any class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: str , __A: Any ) -> Optional[Any]: _A = data _A = None def __repr__( self: int ) -> str: return f"""Node({self.data})""" class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int ) -> Any: _A = None def __iter__( self: List[str] ) -> Any: _A = self.head while node: yield node.data _A = node.next def __len__( self: int ) -> int: return sum(1 for _ in self ) def __repr__( self: Optional[int] ) -> str: return "->".join([str(__A ) for item in self] ) def __getitem__( self: int , __A: int ) -> Any: if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self: Tuple , __A: int , __A: Any ) -> None: if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) _A = self.head for _ in range(__A ): _A = current.next _A = data def __A ( self: int , __A: Any ) -> None: self.insert_nth(len(self ) , __A ) def __A ( self: List[str] , __A: Any ) -> None: self.insert_nth(0 , __A ) def __A ( self: str , __A: int , __A: Any ) -> None: if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) _A = Node(__A ) if self.head is None: _A = new_node elif index == 0: _A = self.head # link new_node to head _A = new_node else: _A = self.head for _ in range(index - 1 ): _A = temp.next _A = temp.next _A = new_node def __A ( self: Union[str, Any] ) -> None: # print every node data print(self ) def __A ( self: Dict ) -> Any: return self.delete_nth(0 ) def __A ( self: Tuple ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __A ( self: Optional[Any] , __A: int = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) _A = self.head # default first node if index == 0: _A = self.head.next else: _A = self.head for _ in range(index - 1 ): _A = temp.next _A = temp.next _A = temp.next.next return delete_node.data def __A ( self: Any ) -> bool: return self.head is None def __A ( self: Any ) -> None: _A = None _A = self.head while current: # Store the current node's next node. _A = current.next # Make the current node's next point backwards _A = prev # Make the previous node be the current node _A = current # Make the current node the next node (to progress iteration) _A = next_node # Return prev in order to put the head at the end _A = prev def __A ( ): '''simple docstring''' _A = LinkedList() assert linked_list.is_empty() is True assert str(_lowercase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_lowercase ) == i linked_list.insert_nth(_lowercase , i + 1 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_lowercase ) == 9 assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): _A = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) ) def __A ( ): '''simple docstring''' _A = [ -9, 1_00, Node(77_34_51_12 ), '''dlrow olleH''', 7, 55_55, 0, -1_92.5_55_55, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] _A = LinkedList() for i in test_input: linked_list.insert_tail(_lowercase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _A = linked_list.delete_head() assert result == -9 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _A = linked_list.delete_tail() assert result == 12.2 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _A = linked_list.delete_nth(10 ) assert result is None assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_lowercase ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_lowercase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __A ( ): '''simple docstring''' from doctest import testmod testmod() _A = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(_lowercase ) print('''\nReading/changing Node data using indexing:''' ) print(f"""Element at Position 1: {linked_list[1]}""" ) _A = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(_lowercase ) print(f"""length of linked_list is : {len(_lowercase )}""" ) if __name__ == "__main__": main()
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __A = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig __A = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring __A = 'UperNetConfig' class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self: Tuple , __A: int , __A: int , __A: Union[int, Tuple[int, int]] , __A: Union[int, Tuple[int, int], str] = 0 , __A: bool = False , __A: Union[int, Tuple[int, int]] = 1 , ) -> None: super().__init__() _A = nn.Convad( in_channels=__A , out_channels=__A , kernel_size=__A , padding=__A , bias=__A , dilation=__A , ) _A = nn.BatchNormad(__A ) _A = nn.ReLU() def __A ( self: List[str] , __A: torch.Tensor ) -> torch.Tensor: _A = self.conv(__A ) _A = self.batch_norm(__A ) _A = self.activation(__A ) return output class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self: List[str] , __A: int , __A: int , __A: int ) -> None: super().__init__() _A = [ nn.AdaptiveAvgPoolad(__A ), UperNetConvModule(__A , __A , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(__A ) , __A ) def __A ( self: int , __A: torch.Tensor ) -> torch.Tensor: _A = input for layer in self.layers: _A = layer(__A ) return hidden_state class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self: int , __A: Tuple[int, ...] , __A: int , __A: int , __A: bool ) -> None: super().__init__() _A = pool_scales _A = align_corners _A = in_channels _A = channels _A = [] for i, pool_scale in enumerate(__A ): _A = UperNetPyramidPoolingBlock(pool_scale=__A , in_channels=__A , channels=__A ) self.blocks.append(__A ) self.add_module(str(__A ) , __A ) def __A ( self: List[Any] , __A: torch.Tensor ) -> List[torch.Tensor]: _A = [] for ppm in self.blocks: _A = ppm(__A ) _A = nn.functional.interpolate( __A , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners ) ppm_outs.append(__A ) return ppm_outs class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self: Optional[Any] , __A: int , __A: Optional[int] ) -> Optional[int]: super().__init__() _A = config _A = config.pool_scales # e.g. (1, 2, 3, 6) _A = in_channels _A = config.hidden_size _A = False _A = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module _A = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) _A = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module _A = nn.ModuleList() _A = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer _A = UperNetConvModule(__A , self.channels , kernel_size=1 ) _A = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(__A ) self.fpn_convs.append(__A ) _A = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def __A ( self: List[str] ) -> List[str]: self.apply(self._init_weights ) def __A ( self: List[str] , __A: Union[str, Any] ) -> Optional[int]: if isinstance(__A , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __A ( self: Dict , __A: Tuple ) -> Any: _A = inputs[-1] _A = [x] psp_outs.extend(self.psp_modules(__A ) ) _A = torch.cat(__A , dim=1 ) _A = self.bottleneck(__A ) return output def __A ( self: Dict , __A: torch.Tensor ) -> torch.Tensor: # build laterals _A = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(__A ) ) # build top-down path _A = len(__A ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _A = laterals[i - 1].shape[2:] _A = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=__A , mode='''bilinear''' , align_corners=self.align_corners ) # build outputs _A = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _A = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners ) _A = torch.cat(__A , dim=1 ) _A = self.fpn_bottleneck(__A ) _A = self.classifier(__A ) return output class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self: str , __A: str , __A: int = 2 , __A: int = 3 , __A: Union[int, Tuple[int, int]] = 1 ) -> None: super().__init__() _A = config _A = config.auxiliary_in_channels _A = config.auxiliary_channels _A = config.auxiliary_num_convs _A = config.auxiliary_concat_input _A = in_index _A = (kernel_size // 2) * dilation _A = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=__A , padding=__A , dilation=__A ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=__A , padding=__A , dilation=__A ) ) if self.num_convs == 0: _A = nn.Identity() else: _A = nn.Sequential(*__A ) if self.concat_input: _A = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=__A , padding=kernel_size // 2 ) _A = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def __A ( self: Tuple ) -> Optional[Any]: self.apply(self._init_weights ) def __A ( self: Tuple , __A: List[Any] ) -> str: if isinstance(__A , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __A ( self: List[Any] , __A: torch.Tensor ) -> torch.Tensor: # just take the relevant feature maps _A = encoder_hidden_states[self.in_index] _A = self.convs(__A ) if self.concat_input: _A = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) _A = self.classifier(__A ) return output class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = UperNetConfig A_ = "pixel_values" A_ = True def __A ( self: List[Any] , __A: Optional[int] ) -> str: if isinstance(__A , __A ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def __A ( self: Any ) -> int: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def __A ( self: int , __A: Tuple , __A: Dict=False ) -> Optional[int]: if isinstance(__A , __A ): _A = value __A = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __A = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , snake_case , ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: List[Any] , __A: str ) -> List[Any]: super().__init__(__A ) _A = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) _A = UperNetHead(__A , in_channels=self.backbone.channels ) _A = UperNetFCNHead(__A ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=__A , config_class=_CONFIG_FOR_DOC ) def __A ( self: List[Any] , __A: Optional[torch.Tensor] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: Optional[torch.Tensor] = None , __A: Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]: _A = return_dict if return_dict is not None else self.config.use_return_dict _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = output_attentions if output_attentions is not None else self.config.output_attentions _A = self.backbone.forward_with_filtered_kwargs( __A , output_hidden_states=__A , output_attentions=__A ) _A = outputs.feature_maps _A = self.decode_head(__A ) _A = nn.functional.interpolate(__A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__A ) _A = None if self.auxiliary_head is not None: _A = self.auxiliary_head(__A ) _A = nn.functional.interpolate( __A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__A ) _A = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss _A = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) _A = loss_fct(__A , __A ) _A = loss_fct(__A , __A ) _A = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: _A = (logits,) + outputs[1:] else: _A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=__A , logits=__A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __A = datasets.load_iris() __A = np.array(data['data']) __A = np.array(data['target']) __A = data['target_names'] __A , __A , __A , __A = train_test_split(X, y) def __A ( _lowercase , _lowercase ): '''simple docstring''' return np.linalg.norm(np.array(_lowercase ) - np.array(_lowercase ) ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=5 ): '''simple docstring''' _A = zip(_lowercase , _lowercase ) # List of distances of all points from the point to be classified _A = [] for data_point in data: _A = euclidean_distance(data_point[0] , _lowercase ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. _A = [i[1] for i in sorted(_lowercase )[:k]] # Most commonly occurring class among them # is the class into which the point is classified _A = Counter(_lowercase ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
from __future__ import annotations __A = 1.6021e-19 # units = C def __A ( _lowercase , _lowercase , _lowercase , ): '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def __A ( _lowercase ): '''simple docstring''' _A = os.path.join(args.tf_model_dir , '''parameters.json''' ) _A = json.loads(open(_lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith('''.pt''' ): _A = args.output + '''.pt''' _A = OrderedDict() with tf.device('''/CPU:0''' ): _A = tf.train.load_checkpoint(args.tf_model_dir ) _A = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _A = reader.get_tensor(_lowercase ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): _A = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): _A = 8 _A = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(_lowercase ) elif key_name.startswith('''model/moe''' ): _A = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): _A = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(_lowercase ) elif key_name.endswith('''/softmlp/kernel''' ): _A = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(_lowercase ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): _A = key_name[-9:-7] for i in range(16 ): _A = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) _A = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _A = torch.tensor(_lowercase ) elif key_name.startswith('''model/mlp''' ): _A = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): _A = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(_lowercase ) elif key_name.endswith('''/p1/bias''' ): _A = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(_lowercase ) elif key_name.endswith('''/p2/kernel''' ): _A = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(_lowercase ) elif key_name.endswith('''/p2/bias''' ): _A = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(_lowercase ) elif key_name.startswith('''model/ln''' ): _A = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _A = '''model.blocks.%d.feed_forward.norm.bias''' % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(_lowercase ) elif key_name.endswith('''/g''' ): _A = '''model.blocks.%d.feed_forward.norm.weight''' % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(_lowercase ) elif key_name.startswith('''model/att''' ): _A = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): _A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _A = state[:, 0, :, :] _A = state[:, 1, :, :] _A = state[:, 2, :, :] _A = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player _A = torch.tensor(_lowercase ) _A = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player _A = torch.tensor(_lowercase ) _A = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player _A = torch.tensor(_lowercase ) elif key_name.endswith('''/o/kernel''' ): _A = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player _A = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(_lowercase ) elif key_name.startswith('''model/an''' ): _A = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _A = '''model.blocks.%d.self_attn.norm.bias''' % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(_lowercase ) elif key_name.endswith('''/g''' ): _A = '''model.blocks.%d.self_attn.norm.weight''' % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(_lowercase ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): _A = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] _A = '''model.%s.weight''' % nlayer _A = vnp.copy() # same in embedded _A = torch.tensor(_lowercase ) if key_name.startswith('''model/wte''' ): _A = '''lm_head.weight''' _A = vnp.copy() # same in embedded _A = torch.tensor(_lowercase ) elif key_name.startswith('''model/wob''' ): _A = '''final_logits_bias''' _A = vnp.copy() # same in embedded _A = state.reshape((1, -1) ) _A = torch.tensor(_lowercase ) elif key_name == "model/dense/kernel": _A = '''model.last_project.weight''' _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(_lowercase ) elif key_name == "model/dense_1/bias": _A = '''model.last_project.bias''' _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(_lowercase ) torch.save(_lowercase , args.output ) if __name__ == "__main__": __A = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') __A = parser.parse_args() convert_tf_gptsan_to_pt(args)
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
def __A ( _lowercase = 60_08_51_47_51_43 ): '''simple docstring''' try: _A = int(_lowercase ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) _A = 1 _A = 2 while i * i <= n: while n % i == 0: _A = i n //= i i += 1 if n > 1: _A = n return int(_lowercase ) if __name__ == "__main__": print(f'{solution() = }')
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __A = trt.Logger(trt.Logger.WARNING) __A = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __A = logging.getLogger(__name__) __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __A = parser.parse_args() if args.tokenizer_name: __A = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __A = args.per_device_eval_batch_size __A = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __A = True __A = 'temp_engine/bert-fp32.engine' if args.fpaa: __A = 'temp_engine/bert-fp16.engine' if args.inta: __A = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __A = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __A = [network.get_input(i) for i in range(network.num_inputs)] __A = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __A = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __A = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __A = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) _A = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) _A = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowercase ) # start time _A = time.time() # Run inference context.execute_async( bindings=[int(_lowercase ) for d_inp in d_inputs] + [int(_lowercase ), int(_lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase ) cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase ) # Synchronize the stream and take time stream.synchronize() # end time _A = time.time() _A = end_time - start_time _A = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __A = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __A = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __A = raw_datasets['validation'].column_names __A = 'question' if 'question' in column_names else column_names[0] __A = 'context' if 'context' in column_names else column_names[1] __A = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __A = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __A = min(args.max_seq_length, tokenizer.model_max_length) def __A ( _lowercase ): '''simple docstring''' _A = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. _A = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_lowercase , stride=args.doc_stride , return_overflowing_tokens=_lowercase , return_offsets_mapping=_lowercase , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. _A = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. _A = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). _A = tokenized_examples.sequence_ids(_lowercase ) _A = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. _A = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. _A = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples __A = raw_datasets['validation'] # Validation Feature Creation __A = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __A = default_data_collator __A = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __A = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase="eval" ): '''simple docstring''' _A = postprocess_qa_predictions( examples=_lowercase , features=_lowercase , predictions=_lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: _A = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: _A = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] _A = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_lowercase , label_ids=_lowercase ) __A = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def __A ( _lowercase ): '''simple docstring''' return trt.volume(engine.get_binding_shape(_lowercase ) ) * engine.get_binding_dtype(_lowercase ).itemsize # Allocate device memory for inputs and outputs. __A = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __A = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __A = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __A = cuda.mem_alloc(h_outputa.nbytes) __A = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __A = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f' Num examples = {len(eval_dataset)}') logger.info(f' Batch size = {args.per_device_eval_batch_size}') __A = 0.0 __A = 0 __A = timeit.default_timer() __A = None for step, batch in enumerate(eval_dataloader): __A , __A = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __A , __A = outputs __A = torch.tensor(start_logits) __A = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __A = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __A = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __A = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __A = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __A = nested_truncate(all_preds, len(eval_dataset)) __A = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000)) logger.info('Total Number of Inference = %d', niter) __A = post_processing_function(eval_examples, eval_dataset, all_preds) __A = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'Evaluation metrics: {eval_metric}')
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm __A = re.compile('[^A-Za-z_0-9]') # parameters used in DuplicationIndex __A = 10 __A = 256 def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) < MIN_NUM_TOKENS: return None _A = MinHash(num_perm=_lowercase ) for token in set(_lowercase ): min_hash.update(token.encode() ) return min_hash def __A ( _lowercase ): '''simple docstring''' return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0} class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: List[str] , *, __A: float = 0.85 , ) -> str: _A = duplication_jaccard_threshold _A = NUM_PERM _A = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _A = defaultdict(__A ) def __A ( self: Union[str, Any] , __A: Tuple , __A: MinHash ) -> None: _A = self._index.query(__A ) if code_key in self._index.keys: print(f"""Duplicate key {code_key}""" ) return self._index.insert(__A , __A ) if len(__A ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__A ) break else: self._duplicate_clusters[close_duplicates[0]].add(__A ) def __A ( self: Optional[int] ) -> List[List[Dict]]: _A = [] for base, duplicates in self._duplicate_clusters.items(): _A = [base] + list(__A ) # reformat the cluster to be a list of dict _A = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(__A ) return duplicate_clusters def __A ( self: Dict , __A: Any ) -> None: _A = self.get_duplicate_clusters() with open(__A , '''w''' ) as f: json.dump(__A , __A ) def __A ( _lowercase ): '''simple docstring''' _A ,_A = element _A = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def __A ( _lowercase ): '''simple docstring''' with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_00_00 ) , chunksize=1_00 , ): if data is not None: yield data def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = DuplicationIndex(duplication_jaccard_threshold=_lowercase ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=1_00 ) ): di.add(_lowercase , _lowercase ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = get_tokens(_lowercase ) _A = get_tokens(_lowercase ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) __A = None def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = [] for elementa in cluster: _A = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _A = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold: elementa["copies"] += 1 break else: _A = 1 extremes.append(_lowercase ) return extremes def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' global _shared_dataset _A = dataset _A = [] _A = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _lowercase , _lowercase , ) , total=len(_lowercase ) , ): extremes_list.append(_lowercase ) return extremes_list def __A ( _lowercase , _lowercase = 0.85 ): '''simple docstring''' _A = make_duplicate_clusters(_lowercase , _lowercase ) _A = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _A = {} _A = find_extremes(_lowercase , _lowercase , _lowercase ) for extremes in extremes_clusters: for element in extremes: _A = element _A = duplicate_indices - set(extreme_dict.keys() ) _A = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _A = element['''base_index'''] in extreme_dict if element["is_extreme"]: _A = extreme_dict[element['''base_index''']]['''copies'''] print(f"""Original dataset size: {len(_lowercase )}""" ) print(f"""Number of duplicate clusters: {len(_lowercase )}""" ) print(f"""Files in duplicate cluster: {len(_lowercase )}""" ) print(f"""Unique files in duplicate cluster: {len(_lowercase )}""" ) print(f"""Filtered dataset size: {len(_lowercase )}""" ) return ds_filter, duplicate_clusters
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if height >= 1: move_tower(height - 1 , _lowercase , _lowercase , _lowercase ) move_disk(_lowercase , _lowercase ) move_tower(height - 1 , _lowercase , _lowercase , _lowercase ) def __A ( _lowercase , _lowercase ): '''simple docstring''' print('''moving disk from''' , _lowercase , '''to''' , _lowercase ) def __A ( ): '''simple docstring''' _A = int(input('''Height of hanoi: ''' ).strip() ) move_tower(_lowercase , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = LayoutLMTokenizer A_ = LayoutLMTokenizerFast A_ = True A_ = True def __A ( self: Any ) -> List[str]: super().setUp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __A ( self: str , **__A: Any ) -> Optional[Any]: return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: Dict , __A: Optional[int] ) -> List[str]: _A = '''UNwant\u00E9d,running''' _A = '''unwanted, running''' return input_text, output_text def __A ( self: Tuple ) -> Optional[Any]: _A = self.tokenizer_class(self.vocab_file ) _A = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] ) def __A ( self: Dict ) -> List[Any]: pass
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __A = logging.get_logger(__name__) __A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json' }, 'merges_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt' }, } __A = {'allegro/herbert-base-cased': 514} __A = {} class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_INIT_CONFIGURATION A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = HerbertTokenizer def __init__( self: List[str] , __A: Dict=None , __A: List[Any]=None , __A: Dict=None , __A: Tuple="<s>" , __A: Optional[int]="<unk>" , __A: Dict="<pad>" , __A: Optional[Any]="<mask>" , __A: Optional[Any]="</s>" , **__A: Optional[int] , ) -> str: super().__init__( __A , __A , tokenizer_file=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , sep_token=__A , **__A , ) def __A ( self: List[Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.cls_token_id] _A = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __A ( self: Optional[Any] , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] def __A ( self: Union[str, Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self: str , __A: str , __A: Optional[str] = None ) -> Tuple[str]: _A = self._tokenizer.model.save(__A , name=__A ) return tuple(__A )
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available __A = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: str ) -> Optional[int]: _A = tf.convert_to_tensor( [ [ 8.2_220_991, # 3rd highest value; idx. 0 -0.5_620_044, 5.23_229_752, 4.0_386_393, -6.8_798_378, -0.54_785_802, -3.2_012_153, 2.92_777_176, 1.88_171_953, 7.35_341_276, # 5th highest value; idx. 9 8.43_207_833, # 2nd highest value; idx. 10 -9.85_711_836, -5.96_209_236, -1.13_039_161, -7.1_115_294, -0.8_369_633, -5.3_186_408, 7.06_427_407, 0.81_369_344, -0.82_023_817, -5.9_179_796, 0.58_813_443, -6.99_778_438, 4.71_551_189, -0.18_771_637, 7.44_020_759, # 4th highest value; idx. 25 9.38_450_987, # 1st highest value; idx. 26 2.12_662_941, -9.32_562_038, 2.35_652_522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_425_518, 4.53_139_238, -5.57_510_464, -6.28_030_699, -7.19_529_503, -4.02_122_551, 1.39_337_037, -6.06_707_057, 1.59_480_517, -9.643_119, 0.03_907_799, 0.67_231_762, -8.88_206_726, 6.27_115_922, # 4th highest value; idx. 13 2.28_520_723, 4.82_767_506, 4.30_421_368, 8.8_275_313, # 2nd highest value; idx. 17 5.44_029_958, # 5th highest value; idx. 18 -4.4_735_794, 7.38_579_536, # 3rd highest value; idx. 20 -2.91_051_663, 2.61_946_077, -2.5_674_762, -9.48_959_302, -4.02_922_645, -1.35_416_918, 9.67_702_323, # 1st highest value; idx. 27 -5.89_478_553, 1.85_370_467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) _A = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above _A = tf.convert_to_tensor( [8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above _A = tf_top_k_top_p_filtering(__A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) _A = output[output != -float('''inf''' )] _A = tf.cast( tf.where(tf.not_equal(__A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__A , __A , rtol=1e-12 ) tf.debugging.assert_equal(__A , __A ) @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case ): """simple docstring""" if is_tf_available(): A_ = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq, "AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM, "AutoModelForVision2Seq": TFAutoModelForVisionaSeq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def __A ( self: int ) -> List[Any]: # TF-only test: tf.saved_model export _A = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _A = 2 _A = 2 class SCREAMING_SNAKE_CASE ( tf.Module ): """simple docstring""" def __init__( self: Tuple , __A: Optional[int] ) -> Dict: super(__A , self ).__init__() _A = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=__A , ) def __A ( self: Optional[Any] , __A: str , __A: int ) -> Optional[int]: _A = self.model.generate( input_ids=__A , attention_mask=__A , max_new_tokens=__A , return_dict_in_generate=__A , ) return {"sequences": outputs["sequences"]} _A = [[2, 0], [1_02, 1_03]] _A = [[1, 0], [1, 1]] _A = DummyModel(model=__A ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__A , __A , signatures={'''serving_default''': dummy_model.serving} ) _A = tf.saved_model.load(__A ).signatures['''serving_default'''] for batch_size in range(1 , len(__A ) + 1 ): _A = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } _A = serving_func(**__A )['''sequences'''] _A = test_model.generate(**__A , max_new_tokens=__A ) tf.debugging.assert_equal(__A , __A ) @slow def __A ( self: List[Any] ) -> List[Any]: # TF-only test: tf.saved_model export _A = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _A = 1 _A = 2 class SCREAMING_SNAKE_CASE ( tf.Module ): """simple docstring""" def __init__( self: Tuple , __A: Union[str, Any] ) -> str: super(__A , self ).__init__() _A = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=__A , ) def __A ( self: int , __A: Dict , __A: Any ) -> str: _A = self.model.generate( input_ids=__A , attention_mask=__A , max_new_tokens=__A , return_dict_in_generate=__A , ) return {"sequences": outputs["sequences"]} _A = [[2], [1_02, 1_03]] _A = [[1], [1, 1]] _A = DummyModel(model=__A ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__A , __A , signatures={'''serving_default''': dummy_model.serving} ) _A = tf.saved_model.load(__A ).signatures['''serving_default'''] for input_row in range(len(__A ) ): _A = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } _A = serving_func(**__A )['''sequences'''] _A = test_model.generate(**__A , max_new_tokens=__A ) tf.debugging.assert_equal(__A , __A ) @slow @require_tensorflow_text def __A ( self: Optional[Any] ) -> int: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=__A ) class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self: int ) -> int: super().__init__() _A = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__A , '''spiece.model''' ) , '''rb''' ).read() ) _A = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def __A ( self: Dict , __A: Dict , *__A: Any , **__A: int ) -> Any: _A = self.tokenizer.tokenize(__A ) _A ,_A = text.pad_model_inputs( __A , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) _A = self.model.generate(input_ids=__A , attention_mask=__A ) return self.tokenizer.detokenize(__A ) _A = CompleteSentenceTransformer() _A = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) _A = complete_model(__A ) _A = tf.keras.Model(__A , __A ) keras_model.save(__A ) def __A ( self: Dict ) -> List[Any]: # Has PT equivalent: this test relies on random sampling _A = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } _A = 14 _A = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _A = '''Hello, my dog is cute and''' _A = tokenizer(__A , return_tensors='''tf''' ) _A = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _A = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) _A = model.generate(**__A , eos_token_id=__A , **__A ) self.assertTrue(expectation == len(generated_tokens[0] ) ) _A = [6_38, 1_98] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) _A = model.generate(**__A , eos_token_id=__A , **__A ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def __A ( self: str ) -> str: # Has PT equivalent: ample use of framework-specific code _A = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) _A = '''Hugging Face is a technology company based in New York and Paris.''' _A = bart_tokenizer(__A , return_tensors='''tf''' ).input_ids _A = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) _A = bart_model.generate(__A ).numpy() class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Any=None , **__A: Optional[int] ) -> Optional[Any]: return super().call(__A , **__A ) _A = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) _A = bart_model.generate(__A , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(__A , __A ) ) class SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ): """simple docstring""" def __A ( self: List[Any] , __A: Optional[int] , **__A: Union[str, Any] ) -> List[str]: return super().call(__A , **__A ) _A = FakeEncoder(bart_model.config , bart_model.model.shared ) _A = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) _A = bart_model.generate(__A ).numpy() with self.assertRaises(__A ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__A , foo='''bar''' )
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = FunnelTokenizer A_ = FunnelTokenizerFast A_ = True A_ = True def __A ( self: Tuple ) -> int: super().setUp() _A = [ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __A ( self: List[str] , **__A: Optional[Any] ) -> int: return FunnelTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: List[Any] , **__A: List[Any] ) -> str: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__A ) def __A ( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: _A = '''UNwant\u00E9d,running''' _A = '''unwanted, running''' return input_text, output_text def __A ( self: Dict ) -> Optional[int]: _A = self.tokenizer_class(self.vocab_file ) _A = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] ) def __A ( self: str ) -> Dict: _A = self.get_tokenizers(do_lower_case=__A ) for tokenizer in tokenizers: _A = tokenizer('''UNwant\u00E9d,running''' ) _A = len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len ) _A = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: str , __A: Any , __A: Union[str, Any]=13 , __A: Union[str, Any]=30 , __A: int=2 , __A: Optional[Any]=3 , __A: Any=True , __A: Any=True , __A: Any=32 , __A: Tuple=5 , __A: int=4 , __A: Optional[Any]=37 , __A: Optional[int]="gelu" , __A: str=0.1 , __A: Any=0.1 , __A: Optional[int]=10 , __A: Any=0.02 , __A: Any=3 , __A: Union[str, Any]=None , __A: int=2 , ) -> str: _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = scope _A = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _A = (image_size // patch_size) ** 2 _A = num_patches + 2 def __A ( self: Dict ) -> Optional[Any]: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def __A ( self: Optional[int] ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __A ( self: int , __A: Union[str, Any] , __A: int , __A: Any ) -> List[Any]: _A = DeiTModel(config=__A ) model.to(__A ) model.eval() _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self: Optional[Any] , __A: Any , __A: int , __A: List[str] ) -> Any: _A = DeiTForMaskedImageModeling(config=__A ) model.to(__A ) model.eval() _A = model(__A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _A = 1 _A = DeiTForMaskedImageModeling(__A ) model.to(__A ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __A ( self: Dict , __A: Optional[int] , __A: Dict , __A: str ) -> Union[str, Any]: _A = self.type_sequence_label_size _A = DeiTForImageClassification(__A ) model.to(__A ) model.eval() _A = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _A = 1 _A = DeiTForImageClassification(__A ) model.to(__A ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self: str ) -> str: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) A_ = False A_ = False A_ = False def __A ( self: List[Any] ) -> Optional[int]: _A = DeiTModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def __A ( self: Tuple ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __A ( self: Optional[int] ) -> Union[str, Any]: pass def __A ( self: Tuple ) -> str: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def __A ( self: List[str] ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) def __A ( self: Dict ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __A ( self: Optional[int] ) -> Tuple: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__A ) def __A ( self: Dict ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __A ( self: Tuple , __A: Optional[Any] , __A: Optional[int] , __A: Tuple=False ) -> List[Any]: _A = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __A ( self: Tuple ) -> Optional[int]: if not self.model_tester.is_training: return _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() _A = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue _A = model_class(__A ) model.to(__A ) model.train() _A = self._prepare_for_class(__A , __A , return_labels=__A ) _A = model(**__A ).loss loss.backward() def __A ( self: Optional[Any] ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _A = False _A = True for model_class in self.all_model_classes: if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue _A = model_class(__A ) model.gradient_checkpointing_enable() model.to(__A ) model.train() _A = self._prepare_for_class(__A , __A , return_labels=__A ) _A = model(**__A ).loss loss.backward() def __A ( self: List[Any] ) -> str: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() _A = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__A ), *get_values(__A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ): _A = problem_type['''title'''] _A = problem_type['''num_labels'''] _A = model_class(__A ) model.to(__A ) model.train() _A = self._prepare_for_class(__A , __A , return_labels=__A ) if problem_type["num_labels"] > 1: _A = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) _A = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__A ) as warning_list: _A = model(**__A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __A ( self: Optional[Any] ) -> Union[str, Any]: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DeiTModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Any ) -> Optional[int]: return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __A ( self: Dict ) -> Any: _A = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( __A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): _A = model(**__A ) # verify the logits _A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __A ) _A = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __A ( self: Optional[int] ) -> List[Any]: _A = DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ) _A = inputs.pixel_values.to(__A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _A = model(__A )
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "altclip_text_model" def __init__( self: List[str] , __A: Union[str, Any]=25_00_02 , __A: List[str]=10_24 , __A: int=24 , __A: int=16 , __A: Dict=40_96 , __A: List[Any]="gelu" , __A: List[str]=0.1 , __A: List[str]=0.1 , __A: int=5_14 , __A: Optional[Any]=1 , __A: int=0.02 , __A: Tuple=0.02 , __A: Optional[int]=1e-05 , __A: Any=1 , __A: Optional[int]=0 , __A: Union[str, Any]=2 , __A: Dict="absolute" , __A: Any=True , __A: Tuple=7_68 , **__A: Optional[int] , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = initializer_factor _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = project_dim class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "altclip_vision_model" def __init__( self: Any , __A: Union[str, Any]=7_68 , __A: List[Any]=30_72 , __A: str=5_12 , __A: Optional[int]=12 , __A: Optional[int]=12 , __A: Optional[Any]=3 , __A: Tuple=2_24 , __A: Dict=32 , __A: Tuple="quick_gelu" , __A: List[str]=1e-5 , __A: Tuple=0.0 , __A: Tuple=0.02 , __A: str=1.0 , **__A: str , ) -> int: super().__init__(**__A ) _A = hidden_size _A = intermediate_size _A = projection_dim _A = num_hidden_layers _A = num_attention_heads _A = num_channels _A = patch_size _A = image_size _A = initializer_range _A = initializer_factor _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def __A ( cls: List[str] , __A: Union[str, os.PathLike] , **__A: Optional[int] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) _A ,_A = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": _A = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "altclip" A_ = True def __init__( self: Optional[int] , __A: Tuple=None , __A: Union[str, Any]=None , __A: Dict=7_68 , __A: Optional[Any]=2.6_592 , **__A: Dict ) -> Optional[int]: # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). _A = kwargs.pop('''text_config_dict''' , __A ) _A = kwargs.pop('''vision_config_dict''' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: _A = {} # This is the complete result when using `text_config_dict`. _A = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: _A = ( f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ f"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: _A = ( f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ f"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: _A = {} # This is the complete result when using `vision_config_dict`. _A = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _A = { str(__A ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: _A = ( f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: _A = ( f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ f"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: _A = {} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: _A = {} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) _A = AltCLIPTextConfig(**__A ) _A = AltCLIPVisionConfig(**__A ) _A = projection_dim _A = logit_scale_init_value _A = 1.0 @classmethod def __A ( cls: str , __A: AltCLIPTextConfig , __A: AltCLIPVisionConfig , **__A: Dict ) -> str: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def __A ( self: str ) -> Dict: _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = logging.get_logger(__name__) __A = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "deta" A_ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: int , __A: List[str]=None , __A: Optional[int]=9_00 , __A: Dict=20_48 , __A: Union[str, Any]=6 , __A: Union[str, Any]=20_48 , __A: Tuple=8 , __A: Optional[int]=6 , __A: str=10_24 , __A: Optional[Any]=8 , __A: Union[str, Any]=0.0 , __A: Optional[Any]=True , __A: List[Any]="relu" , __A: List[str]=2_56 , __A: Any=0.1 , __A: str=0.0 , __A: List[str]=0.0 , __A: List[Any]=0.02 , __A: List[str]=1.0 , __A: Any=True , __A: Dict=False , __A: List[str]="sine" , __A: Optional[Any]=5 , __A: Union[str, Any]=4 , __A: List[str]=4 , __A: Optional[Any]=True , __A: List[str]=3_00 , __A: List[Any]=True , __A: str=True , __A: Optional[Any]=1 , __A: Dict=5 , __A: int=2 , __A: List[Any]=1 , __A: Any=1 , __A: Tuple=5 , __A: List[Any]=2 , __A: Any=0.1 , __A: Any=0.25 , **__A: int , ) -> Any: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _A = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(__A , __A ): _A = backbone_config.pop('''model_type''' ) _A = CONFIG_MAPPING[backbone_model_type] _A = config_class.from_dict(__A ) _A = backbone_config _A = num_queries _A = max_position_embeddings _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = init_xavier_std _A = encoder_layerdrop _A = auxiliary_loss _A = position_embedding_type # deformable attributes _A = num_feature_levels _A = encoder_n_points _A = decoder_n_points _A = two_stage _A = two_stage_num_proposals _A = with_box_refine _A = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _A = class_cost _A = bbox_cost _A = giou_cost # Loss coefficients _A = mask_loss_coefficient _A = dice_loss_coefficient _A = bbox_loss_coefficient _A = giou_loss_coefficient _A = eos_coefficient _A = focal_alpha super().__init__(is_encoder_decoder=__A , **__A ) @property def __A ( self: List[Any] ) -> int: return self.encoder_attention_heads @property def __A ( self: Optional[Any] ) -> int: return self.d_model def __A ( self: Union[str, Any] ) -> Optional[int]: _A = copy.deepcopy(self.__dict__ ) _A = self.backbone_config.to_dict() _A = self.__class__.model_type return output
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __A = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) __A = logging.get_logger(__name__) # pylint: disable=invalid-name def __A ( ): '''simple docstring''' _A = '''https://pypi.org/pypi/diffusers/json''' _A = json.loads(request.urlopen(_lowercase ).read() )['''releases'''].keys() return sorted(_lowercase , key=lambda _lowercase : version.Version(_lowercase ) ) def __A ( ): '''simple docstring''' if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowercase ) os.makedirs(_lowercase , exist_ok=_lowercase ) _A = Path(_lowercase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def __A ( _lowercase ): '''simple docstring''' init_hf_modules() _A = Path(_lowercase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowercase , exist_ok=_lowercase ) _A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def __A ( _lowercase ): '''simple docstring''' with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f: _A = f.read() # Imports of the form `import .xxx` _A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , _lowercase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , _lowercase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowercase ) ) def __A ( _lowercase ): '''simple docstring''' _A = False _A = [module_file] _A = [] # Let's recurse through all relative imports while not no_change: _A = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowercase ) ) _A = Path(_lowercase ).parent _A = [str(module_path / m ) for m in new_imports] _A = [f for f in new_import_files if f not in all_relative_imports] _A = [f"""{f}.py""" for f in new_import_files] _A = len(_lowercase ) == 0 all_relative_imports.extend(_lowercase ) return all_relative_imports def __A ( _lowercase ): '''simple docstring''' with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f: _A = f.read() # Imports of the form `import xxx` _A = re.findall('''^\s*import\s+(\S+)\s*$''' , _lowercase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , _lowercase , flags=re.MULTILINE ) # Only keep the top-level module _A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all _A = list(set(_lowercase ) ) _A = [] for imp in imports: try: importlib.import_module(_lowercase ) except ImportError: missing_packages.append(_lowercase ) if len(_lowercase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f"""{", ".join(_lowercase )}. Run `pip install {" ".join(_lowercase )}`""" ) return get_relative_imports(_lowercase ) def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = module_path.replace(os.path.sep , '''.''' ) _A = importlib.import_module(_lowercase ) if class_name is None: return find_pipeline_class(_lowercase ) return getattr(_lowercase , _lowercase ) def __A ( _lowercase ): '''simple docstring''' from ..pipelines import DiffusionPipeline _A = dict(inspect.getmembers(_lowercase , inspect.isclass ) ) _A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowercase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) _A = cls return pipeline_class def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , ): '''simple docstring''' _A = str(_lowercase ) _A = os.path.join(_lowercase , _lowercase ) if os.path.isfile(_lowercase ): _A = module_file_or_url _A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: _A = get_diffusers_versions() # cut ".dev0" _A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: _A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: _A = f"""v{revision}""" elif revision == "main": _A = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub _A = COMMUNITY_PIPELINES_URL.format(revision=_lowercase , pipeline=_lowercase ) try: _A = cached_download( _lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , ) _A = '''git''' _A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached _A = hf_hub_download( _lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , ) _A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment _A = check_imports(_lowercase ) # Now we move the module inside our cached dynamic modules. _A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowercase ) _A = Path(_lowercase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowercase , submodule_path / module_file ) for module_needed in modules_needed: _A = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowercase , _lowercase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowercase , _lowercase ): _A = use_auth_token elif use_auth_token is True: _A = HfFolder.get_token() else: _A = None _A = model_info(_lowercase , revision=_lowercase , token=_lowercase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. _A = submodule_path / commit_hash _A = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowercase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowercase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowercase , f"""{module_needed}.py""" , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , ) return os.path.join(_lowercase , _lowercase ) def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ): '''simple docstring''' _A = get_cached_module_file( _lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , ) return get_class_in_module(_lowercase , final_module.replace('''.py''' , '''''' ) )
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __A = get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Dict , __A: Optional[str] = None ) -> List[Any]: _A = ( os.path.join(__A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) _A = Extractor def __A ( self: Union[str, Any] , __A: str ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" _A = os.path.abspath(__A ) return os.path.join(self.extract_dir , hash_url_to_filename(__A ) ) def __A ( self: str , __A: str , __A: bool ) -> bool: return force_extract or ( not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A )) ) def __A ( self: List[str] , __A: str , __A: bool = False ) -> str: _A = self.extractor.infer_extractor_format(__A ) if not extractor_format: return input_path _A = self._get_output_path(__A ) if self._do_extract(__A , __A ): self.extractor.extract(__A , __A , __A ) return output_path class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @classmethod @abstractmethod def __A ( cls: str , __A: Union[Path, str] , **__A: Any ) -> bool: ... @staticmethod @abstractmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: ... class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = [] @staticmethod def __A ( __A: Union[Path, str] , __A: int ) -> List[Any]: with open(__A , '''rb''' ) as f: return f.read(__A ) @classmethod def __A ( cls: Any , __A: Union[Path, str] , __A: bytes = b"" ) -> bool: if not magic_number: _A = max(len(__A ) for cls_magic_number in cls.magic_numbers ) try: _A = cls.read_magic_number(__A , __A ) except OSError: return False return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @classmethod def __A ( cls: Union[str, Any] , __A: Union[Path, str] , **__A: str ) -> bool: return tarfile.is_tarfile(__A ) @staticmethod def __A ( __A: Optional[Any] , __A: Any ) -> str: def resolved(__A: str ) -> str: return os.path.realpath(os.path.abspath(__A ) ) def badpath(__A: str , __A: str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(__A , __A ) ).startswith(__A ) def badlink(__A: Optional[int] , __A: str ) -> bool: # Links are interpreted relative to the directory containing the link _A = resolved(os.path.join(__A , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=__A ) _A = resolved(__A ) for finfo in members: if badpath(finfo.name , __A ): logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(__A , __A ): logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(__A , __A ): logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: os.makedirs(__A , exist_ok=__A ) _A = tarfile.open(__A ) tar_file.extractall(__A , members=TarExtractor.safemembers(__A , __A ) ) tar_file.close() class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [B"\x1F\x8B"] @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: with gzip.open(__A , '''rb''' ) as gzip_file: with open(__A , '''wb''' ) as extracted_file: shutil.copyfileobj(__A , __A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [ B"PK\x03\x04", B"PK\x05\x06", # empty archive B"PK\x07\x08", # spanned archive ] @classmethod def __A ( cls: List[str] , __A: Union[Path, str] , __A: bytes = b"" ) -> bool: if super().is_extractable(__A , magic_number=__A ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(__A , '''rb''' ) as fp: _A = _EndRecData(__A ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: _A = fp.read(__A ) # CD is where we expect it to be if len(__A ) == sizeCentralDir: _A = struct.unpack(__A , __A ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: os.makedirs(__A , exist_ok=__A ) with zipfile.ZipFile(__A , '''r''' ) as zip_file: zip_file.extractall(__A ) zip_file.close() class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [B"\xFD\x37\x7A\x58\x5A\x00"] @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: with lzma.open(__A ) as compressed_file: with open(__A , '''wb''' ) as extracted_file: shutil.copyfileobj(__A , __A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [B"Rar!\x1a\x07\x00", B"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''' ) import rarfile os.makedirs(__A , exist_ok=__A ) _A = rarfile.RarFile(__A ) rf.extractall(__A ) rf.close() class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [B"\x28\xb5\x2F\xFD"] @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''' ) import zstandard as zstd _A = zstd.ZstdDecompressor() with open(__A , '''rb''' ) as ifh, open(__A , '''wb''' ) as ofh: dctx.copy_stream(__A , __A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [B"\x42\x5A\x68"] @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: with bza.open(__A , '''rb''' ) as compressed_file: with open(__A , '''wb''' ) as extracted_file: shutil.copyfileobj(__A , __A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [B"\x37\x7A\xBC\xAF\x27\x1C"] @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''' ) import pyazr os.makedirs(__A , exist_ok=__A ) with pyazr.SevenZipFile(__A , '''r''' ) as archive: archive.extractall(__A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [B"\x04\x22\x4D\x18"] @staticmethod def __A ( __A: Union[Path, str] , __A: Union[Path, str] ) -> None: if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''' ) import lza.frame with lza.frame.open(__A , '''rb''' ) as compressed_file: with open(__A , '''wb''' ) as extracted_file: shutil.copyfileobj(__A , __A ) class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def __A ( cls: Any ) -> List[Any]: return max( len(__A ) for extractor in cls.extractors.values() if issubclass(__A , __A ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def __A ( __A: Union[Path, str] , __A: int ) -> Tuple: try: return MagicNumberBaseExtractor.read_magic_number(__A , magic_number_length=__A ) except OSError: return b"" @classmethod def __A ( cls: int , __A: Union[Path, str] , __A: bool = False ) -> bool: warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''' , category=__A , ) _A = cls.infer_extractor_format(__A ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def __A ( cls: Any , __A: Union[Path, str] ) -> str: # <Added version="2.4.0"/> _A = cls._get_magic_number_max_length() _A = cls._read_magic_number(__A , __A ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(__A , magic_number=__A ): return extractor_format @classmethod def __A ( cls: Any , __A: Union[Path, str] , __A: Union[Path, str] , __A: Optional[str] = None , __A: Optional[BaseExtractor] = "deprecated" , ) -> None: os.makedirs(os.path.dirname(__A ) , exist_ok=__A ) # Prevent parallel extractions _A = str(Path(__A ).with_suffix('''.lock''' ) ) with FileLock(__A ): shutil.rmtree(__A , ignore_errors=__A ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(__A , __A ): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''' , category=__A , ) _A = extractor if extractor != '''deprecated''' else extractor_format else: _A = cls.extractors[extractor_format] return extractor.extract(__A , __A ) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''' , category=__A , ) for extractor in cls.extractors.values(): if extractor.is_extractable(__A ): return extractor.extract(__A , __A )
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = "ssube/stable-diffusion-x4-upscaler-onnx" def __A ( self: Union[str, Any] , __A: str=0 ) -> int: _A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__A ) ) _A = torch.manual_seed(__A ) _A = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __A ( self: int ) -> Optional[Any]: _A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs() _A = pipe(**__A ).images _A = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) _A = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def __A ( self: List[Any] ) -> Optional[Any]: _A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs() _A = pipe(**__A ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __A ( self: Tuple ) -> Any: _A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs() _A = pipe(**__A ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __A ( self: List[Any] ) -> Optional[Any]: _A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs() _A = pipe(**__A ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __A ( self: List[Any] ) -> Dict: _A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs() _A = pipe(**__A ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @property def __A ( self: List[Any] ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __A ( self: str ) -> str: _A = ort.SessionOptions() _A = False return options def __A ( self: List[Any] ) -> Optional[int]: _A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _A = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default _A = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) _A = '''A fantasy landscape, trending on artstation''' _A = torch.manual_seed(0 ) _A = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type='''np''' , ) _A = output.images _A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) _A = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def __A ( self: Tuple ) -> Tuple: _A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _A = init_image.resize((1_28, 1_28) ) _A = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' ) _A = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) _A = '''A fantasy landscape, trending on artstation''' _A = torch.manual_seed(0 ) _A = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type='''np''' , ) _A = output.images _A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) _A = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL __A = logging.get_logger(__name__) def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ): _A = round(val / multiple ) * multiple if max_val is not None and x > max_val: _A = math.floor(val / multiple ) * multiple if x < min_val: _A = math.ceil(val / multiple ) * multiple return x _A = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size _A ,_A = get_image_size(_lowercase ) _A ,_A = output_size # determine new height and width _A = output_height / input_height _A = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _A = scale_width else: # fit height _A = scale_height _A = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase ) _A = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = ["pixel_values"] def __init__( self: int , __A: bool = True , __A: Dict[str, int] = None , __A: PILImageResampling = PILImageResampling.BILINEAR , __A: bool = False , __A: int = 1 , __A: bool = True , __A: Union[int, float] = 1 / 2_55 , __A: bool = True , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[float, List[float]]] = None , **__A: Optional[int] , ) -> None: super().__init__(**__A ) _A = size if size is not None else {'''height''': 3_84, '''width''': 3_84} _A = get_size_dict(__A ) _A = do_resize _A = size _A = keep_aspect_ratio _A = ensure_multiple_of _A = resample _A = do_rescale _A = rescale_factor _A = do_normalize _A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _A = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self: Dict , __A: np.ndarray , __A: Dict[str, int] , __A: bool = False , __A: int = 1 , __A: PILImageResampling = PILImageResampling.BICUBIC , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Union[str, Any] , ) -> np.ndarray: _A = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _A = get_resize_output_image_size( __A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__A , multiple=__A , ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def __A ( self: Optional[int] , __A: np.ndarray , __A: Union[int, float] , __A: Optional[Union[str, ChannelDimension]] = None , **__A: List[str] , ) -> Optional[int]: return rescale(__A , scale=__A , data_format=__A , **__A ) def __A ( self: Optional[int] , __A: np.ndarray , __A: Union[float, List[float]] , __A: Union[float, List[float]] , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Optional[Any] , ) -> np.ndarray: return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def __A ( self: Any , __A: ImageInput , __A: bool = None , __A: int = None , __A: bool = None , __A: int = None , __A: PILImageResampling = None , __A: bool = None , __A: float = None , __A: bool = None , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[str, TensorType]] = None , __A: ChannelDimension = ChannelDimension.FIRST , **__A: Tuple , ) -> PIL.Image.Image: _A = do_resize if do_resize is not None else self.do_resize _A = size if size is not None else self.size _A = get_size_dict(__A ) _A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _A = resample if resample is not None else self.resample _A = do_rescale if do_rescale is not None else self.do_rescale _A = rescale_factor if rescale_factor is not None else self.rescale_factor _A = do_normalize if do_normalize is not None else self.do_normalize _A = image_mean if image_mean is not None else self.image_mean _A = image_std if image_std is not None else self.image_std _A = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _A = [to_numpy_array(__A ) for image in images] if do_resize: _A = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_rescale: _A = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: _A = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] _A = [to_channel_dimension_format(__A , __A ) for image in images] _A = {'''pixel_values''': images} return BatchFeature(data=__A , tensor_type=__A ) def __A ( self: Union[str, Any] , __A: int , __A: List[Tuple] = None ) -> str: _A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__A ) != len(__A ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(__A ): _A = target_sizes.numpy() _A = [] for idx in range(len(__A ) ): _A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__A ) _A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__A ) else: _A = logits.argmax(dim=1 ) _A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
def __A ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''only integers accepted as input''' ) else: _A = str(abs(_lowercase ) ) _A = [list(_lowercase ) for char in range(len(_lowercase ) )] for index in range(len(_lowercase ) ): num_transpositions[index].pop(_lowercase ) return max( int(''''''.join(list(_lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = [1] for i in range(2 , _lowercase ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" _A = [] _A = list(range(_lowercase ) ) # Find permutation while factorials: _A = factorials.pop() _A ,_A = divmod(_lowercase , _lowercase ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
def __A ( _lowercase ): '''simple docstring''' _A = 0 for ch in input_str: _A = ord(_lowercase ) _A = pow(2 , _lowercase ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __A ( _lowercase , _lowercase , _lowercase , _lowercase="attention" ): '''simple docstring''' _A = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] _A = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] _A = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] _A = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def __A ( _lowercase , _lowercase , _lowercase , _lowercase=False ): '''simple docstring''' if split_mlp_wi: _A = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] _A = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] _A = (wi_a, wi_a) else: _A = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] _A = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def __A ( _lowercase , *, _lowercase , _lowercase ): '''simple docstring''' _A = traverse_util.flatten_dict(variables['''target'''] ) _A = {'''/'''.join(_lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _A = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , _lowercase ) _A = collections.OrderedDict() # Shared embeddings. _A = old['''token_embedder/embedding'''] # Encoder. for i in range(_lowercase ): # Block i, layer 0 (Self Attention). _A = tax_layer_norm_lookup(_lowercase , _lowercase , '''encoder''' , '''pre_attention_layer_norm''' ) _A ,_A ,_A ,_A = tax_attention_lookup(_lowercase , _lowercase , '''encoder''' , '''attention''' ) _A = layer_norm _A = k.T _A = o.T _A = q.T _A = v.T # Block i, layer 1 (MLP). _A = tax_layer_norm_lookup(_lowercase , _lowercase , '''encoder''' , '''pre_mlp_layer_norm''' ) _A ,_A = tax_mlp_lookup(_lowercase , _lowercase , '''encoder''' , _lowercase ) _A = layer_norm if split_mlp_wi: _A = wi[0].T _A = wi[1].T else: _A = wi.T _A = wo.T _A = old[ '''encoder/relpos_bias/rel_embedding''' ].T _A = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(_lowercase ): # Block i, layer 0 (Self Attention). _A = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_self_attention_layer_norm''' ) _A ,_A ,_A ,_A = tax_attention_lookup(_lowercase , _lowercase , '''decoder''' , '''self_attention''' ) _A = layer_norm _A = k.T _A = o.T _A = q.T _A = v.T # Block i, layer 1 (Cross Attention). _A = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_cross_attention_layer_norm''' ) _A ,_A ,_A ,_A = tax_attention_lookup(_lowercase , _lowercase , '''decoder''' , '''encoder_decoder_attention''' ) _A = layer_norm _A = k.T _A = o.T _A = q.T _A = v.T # Block i, layer 2 (MLP). _A = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_mlp_layer_norm''' ) _A ,_A = tax_mlp_lookup(_lowercase , _lowercase , '''decoder''' , _lowercase ) _A = layer_norm if split_mlp_wi: _A = wi[0].T _A = wi[1].T else: _A = wi.T _A = wo.T _A = old['''decoder/decoder_norm/scale'''] _A = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _A = old['''decoder/logits_dense/kernel'''].T return new def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _A = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _A = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) _A = state_dict['''shared.weight'''] return state_dict def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = checkpoints.load_tax_checkpoint(_lowercase ) _A = convert_tax_to_pytorch(_lowercase , num_layers=config.num_layers , is_encoder_only=_lowercase ) _A = make_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase , strict=_lowercase ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase = False ): '''simple docstring''' _A = TaConfig.from_json_file(_lowercase ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _A = TaEncoderModel(_lowercase ) else: _A = TaForConditionalGeneration(_lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(_lowercase , _lowercase , _lowercase , _lowercase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(_lowercase ) print('''Done''' ) if __name__ == "__main__": __A = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __A = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = LEDConfig A_ = {} A_ = "gelu" def __init__( self: Union[str, Any] , __A: Dict , __A: Optional[Any]=13 , __A: Optional[int]=7 , __A: str=True , __A: Union[str, Any]=False , __A: List[Any]=99 , __A: Tuple=32 , __A: str=2 , __A: Optional[Any]=4 , __A: Optional[int]=37 , __A: str=0.1 , __A: Tuple=0.1 , __A: List[Any]=20 , __A: Optional[Any]=2 , __A: Union[str, Any]=1 , __A: Optional[int]=0 , __A: Optional[Any]=4 , ) -> List[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id _A = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _A = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _A = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __A ( self: Dict ) -> Any: _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _A = prepare_led_inputs_dict(__A , __A , __A ) _A = tf.concat( [tf.zeros_like(__A )[:, :-1], tf.ones_like(__A )[:, -1:]] , axis=-1 , ) _A = global_attention_mask return config, inputs_dict def __A ( self: int , __A: Dict , __A: Optional[int] ) -> List[Any]: _A = TFLEDModel(config=__A ).get_decoder() _A = inputs_dict['''input_ids'''] _A = input_ids[:1, :] _A = inputs_dict['''attention_mask'''][:1, :] _A = 1 # first forward pass _A = model(__A , attention_mask=__A , use_cache=__A ) _A ,_A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__A , attention_mask=__A )[0] _A = model(__A , attention_mask=__A , past_key_values=__A )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__A , __A , rtol=1e-3 ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ): '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () A_ = (TFLEDForConditionalGeneration,) if is_tf_available() else () A_ = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) A_ = True A_ = False A_ = False A_ = False def __A ( self: Any ) -> List[Any]: _A = TFLEDModelTester(self ) _A = ConfigTester(self , config_class=__A ) def __A ( self: int ) -> Union[str, Any]: self.config_tester.run_common_tests() def __A ( self: List[str] ) -> int: _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__A ) def __A ( self: Optional[int] ) -> Optional[int]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() _A = tf.zeros_like(inputs_dict['''attention_mask'''] ) _A = 2 _A = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _A = True _A = self.model_tester.seq_length _A = self.model_tester.encoder_seq_length def check_decoder_attentions_output(__A: str ): _A = outputs.decoder_attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(__A: Tuple ): _A = [t.numpy() for t in outputs.encoder_attentions] _A = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _A = True _A = False _A = False _A = model_class(__A ) _A = model(self._prepare_for_class(__A , __A ) ) _A = len(__A ) self.assertEqual(config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) if self.is_encoder_decoder: _A = model_class(__A ) _A = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(config.output_hidden_states , __A ) check_decoder_attentions_output(__A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _A = True _A = model_class(__A ) _A = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) # Check attention is always last and order is fine _A = True _A = True _A = model_class(__A ) _A = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) ) self.assertEqual(model.config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def __A ( self: Optional[int] ) -> List[str]: pass def __A ( self: Dict ) -> Optional[Any]: # TODO: Head-masking not yet implement pass def __A ( _lowercase ): '''simple docstring''' return tf.constant(_lowercase , dtype=tf.intaa ) __A = 1e-4 @slow @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Optional[int] ) -> Any: _A = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _A = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _A = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _A = prepare_led_inputs_dict(model.config , __A , __A ) _A = model(**__A )[0] _A = (1, 10_24, 7_68) self.assertEqual(output.shape , __A ) # change to expected output here _A = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1e-3 ) def __A ( self: Any ) -> Union[str, Any]: _A = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _A = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _A = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _A = prepare_led_inputs_dict(model.config , __A , __A ) _A = model(**__A )[0] _A = (1, 10_24, model.config.vocab_size) self.assertEqual(output.shape , __A ) # change to expected output here _A = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1e-3 , rtol=1e-3 )
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Any , __A: int , __A: int , __A: float = 0 ) -> None: _A ,_A = row, column _A = [[default_value for c in range(__A )] for r in range(__A )] def __str__( self: int ) -> str: _A = f"""Matrix consist of {self.row} rows and {self.column} columns\n""" # Make string identifier _A = 0 for row_vector in self.array: for obj in row_vector: _A = max(__A , len(str(__A ) ) ) _A = f"""%{max_element_length}s""" # Make string and return def single_line(__A: list[float] ) -> str: nonlocal string_format_identifier _A = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__A ) for row_vector in self.array ) return s def __repr__( self: List[str] ) -> str: return str(self ) def __A ( self: str , __A: tuple[int, int] ) -> bool: if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self: int , __A: tuple[int, int] ) -> Any: assert self.validate_indicies(__A ) return self.array[loc[0]][loc[1]] def __setitem__( self: Any , __A: tuple[int, int] , __A: float ) -> None: assert self.validate_indicies(__A ) _A = value def __add__( self: int , __A: Matrix ) -> Matrix: assert isinstance(__A , __A ) assert self.row == another.row and self.column == another.column # Add _A = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _A = self[r, c] + another[r, c] return result def __neg__( self: Dict ) -> Matrix: _A = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _A = -self[r, c] return result def __sub__( self: List[Any] , __A: Matrix ) -> Matrix: return self + (-another) def __mul__( self: List[str] , __A: int | float | Matrix ) -> Matrix: if isinstance(__A , (int, float) ): # Scalar multiplication _A = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _A = self[r, c] * another return result elif isinstance(__A , __A ): # Matrix multiplication assert self.column == another.row _A = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: _A = f"""Unsupported type given for another ({type(__A )})""" raise TypeError(__A ) def __A ( self: int ) -> Matrix: _A = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): _A = self[r, c] return result def __A ( self: List[str] , __A: Matrix , __A: Matrix ) -> Any: assert isinstance(__A , __A ) and isinstance(__A , __A ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate _A = v.transpose() _A = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def __A ( ): '''simple docstring''' _A = Matrix(3 , 3 , 0 ) for i in range(3 ): _A = 1 print(f"""a^(-1) is {ainv}""" ) # u, v _A = Matrix(3 , 1 , 0 ) _A ,_A ,_A = 1, 2, -3 _A = Matrix(3 , 1 , 0 ) _A ,_A ,_A = 4, -2, 5 print(f"""u is {u}""" ) print(f"""v is {v}""" ) print(f"""uv^T is {u * v.transpose()}""" ) # Sherman Morrison print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowercase , _lowercase )}""" ) def __A ( ): '''simple docstring''' import doctest doctest.testmod() testa()
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A = 16 __A = 32 def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = 16 ): '''simple docstring''' _A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _A = DatasetDict( { '''train''': dataset['''train'''].select(_lowercase ), '''validation''': dataset['''train'''].select(_lowercase ), '''test''': dataset['''validation'''], } ) def tokenize_function(_lowercase ): # max_length=None => use the model max length (it's actually the default) _A = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _A = datasets.map( _lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _A = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _A = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _A = 16 elif accelerator.mixed_precision != "no": _A = 8 else: _A = None return tokenizer.pad( _lowercase , padding='''longest''' , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors='''pt''' , ) # Instantiate dataloaders. _A = DataLoader( tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) _A = DataLoader( tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) _A = DataLoader( tokenized_datasets['''test'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader, test_dataloader def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = [] # Download the dataset _A = load_dataset('''glue''' , '''mrpc''' ) # Create our splits _A = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _A = config['''lr'''] _A = int(config['''num_epochs'''] ) _A = int(config['''seed'''] ) _A = int(config['''batch_size'''] ) _A = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _A = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _A = batch_size // MAX_GPU_BATCH_SIZE _A = MAX_GPU_BATCH_SIZE set_seed(_lowercase ) # New Code # # Create our folds: _A = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) _A = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(_lowercase ): _A ,_A ,_A = get_fold_dataloaders( _lowercase , _lowercase , _lowercase , _lowercase , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _A = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _A = model.to(accelerator.device ) # Instantiate optimizer _A = AdamW(params=model.parameters() , lr=_lowercase ) # Instantiate scheduler _A = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=1_00 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _A ,_A ,_A ,_A ,_A = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # Now we train the model for epoch in range(_lowercase ): model.train() for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _A = model(**_lowercase ) _A = outputs.loss _A = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _A = model(**_lowercase ) _A = outputs.logits.argmax(dim=-1 ) _A ,_A = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_lowercase , references=_lowercase , ) _A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , _lowercase ) # New Code # # We also run predictions on the test set at the very end _A = [] for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _A = model(**_lowercase ) _A = outputs.logits _A ,_A = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(_lowercase , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _A = torch.cat(_lowercase , dim=0 ) _A = torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _A = metric.compute(predictions=_lowercase , references=_lowercase ) accelerator.print('''Average test metrics from all folds:''' , _lowercase ) def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_lowercase , default=_lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=_lowercase , default=3 , help='''The number of splits to perform across the dataset''' ) _A = parser.parse_args() _A = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "xlm-prophetnet" A_ = ["past_key_values"] A_ = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self: Optional[int] , __A: Optional[float] = 0.1 , __A: Optional[Union[str, Callable]] = "gelu" , __A: Optional[int] = 3_05_22 , __A: Optional[int] = 10_24 , __A: Optional[int] = 40_96 , __A: Optional[int] = 12 , __A: Optional[int] = 16 , __A: Optional[int] = 40_96 , __A: Optional[int] = 12 , __A: Optional[int] = 16 , __A: Optional[float] = 0.1 , __A: Optional[float] = 0.1 , __A: Optional[int] = 5_12 , __A: Optional[float] = 0.02 , __A: Optional[bool] = True , __A: Optional[bool] = True , __A: Optional[int] = 0 , __A: Optional[int] = 2 , __A: Optional[int] = 32 , __A: Optional[int] = 1_28 , __A: Optional[bool] = False , __A: Optional[float] = 0.0 , __A: Optional[bool] = True , __A: Optional[int] = 0 , __A: Optional[int] = 1 , __A: Optional[int] = 2 , **__A: Tuple , ) -> List[Any]: _A = vocab_size _A = hidden_size _A = encoder_ffn_dim _A = num_encoder_layers _A = num_encoder_attention_heads _A = decoder_ffn_dim _A = num_decoder_layers _A = num_decoder_attention_heads _A = max_position_embeddings _A = init_std # Normal(0, this parameter) _A = activation_function # parameters for xlmprophetnet _A = ngram _A = num_buckets _A = relative_max_distance _A = disable_ngram_loss _A = eps # 3 Types of Dropout _A = attention_dropout _A = activation_dropout _A = dropout _A = use_cache super().__init__( pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_start_token_id=__A , **__A , ) @property def __A ( self: Union[str, Any] ) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __A ( self: Tuple , __A: Dict ) -> int: raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = ["image_processor", "tokenizer"] A_ = "FlavaImageProcessor" A_ = ("BertTokenizer", "BertTokenizerFast") def __init__( self: str , __A: Dict=None , __A: List[str]=None , **__A: Tuple ) -> str: _A = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __A , ) _A = kwargs.pop('''feature_extractor''' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__A , __A ) _A = self.image_processor def __call__( self: Any , __A: Optional[ImageInput] = None , __A: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __A: bool = True , __A: Union[bool, str, PaddingStrategy] = False , __A: Union[bool, str, TruncationStrategy] = False , __A: Optional[int] = None , __A: int = 0 , __A: Optional[int] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = True , __A: Optional[Union[str, TensorType]] = None , **__A: List[Any] , ) -> Any: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: _A = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) if images is not None: _A = self.image_processor( __A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , ) if text is not None and images is not None: encoding.update(__A ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def __A ( self: int , *__A: Dict , **__A: Union[str, Any] ) -> str: return self.tokenizer.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Optional[Any] , **__A: Tuple ) -> Union[str, Any]: return self.tokenizer.decode(*__A , **__A ) @property def __A ( self: Union[str, Any] ) -> str: _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __A ( self: Optional[int] ) -> Optional[int]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __A , ) return self.image_processor_class @property def __A ( self: Optional[int] ) -> Union[str, Any]: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __A , ) return self.image_processor
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "align_text_model" def __init__( self: Optional[Any] , __A: Optional[int]=3_05_22 , __A: str=7_68 , __A: Any=12 , __A: List[Any]=12 , __A: List[str]=30_72 , __A: Union[str, Any]="gelu" , __A: Optional[int]=0.1 , __A: Dict=0.1 , __A: Optional[Any]=5_12 , __A: Optional[Any]=2 , __A: Optional[int]=0.02 , __A: Optional[int]=1e-12 , __A: Optional[Any]=0 , __A: Any="absolute" , __A: Union[str, Any]=True , **__A: Optional[Any] , ) -> Union[str, Any]: super().__init__(**__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = pad_token_id @classmethod def __A ( cls: Tuple , __A: Union[str, os.PathLike] , **__A: int ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) _A ,_A = cls.get_config_dict(__A , **__A ) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": _A = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "align_vision_model" def __init__( self: Optional[int] , __A: int = 3 , __A: int = 6_00 , __A: float = 2.0 , __A: float = 3.1 , __A: int = 8 , __A: List[int] = [3, 3, 5, 3, 5, 5, 3] , __A: List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , __A: List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , __A: List[int] = [] , __A: List[int] = [1, 2, 2, 2, 1, 2, 1] , __A: List[int] = [1, 2, 2, 3, 3, 4, 1] , __A: List[int] = [1, 6, 6, 6, 6, 6, 6] , __A: float = 0.25 , __A: str = "swish" , __A: int = 25_60 , __A: str = "mean" , __A: float = 0.02 , __A: float = 0.001 , __A: float = 0.99 , __A: float = 0.2 , **__A: str , ) -> str: super().__init__(**__A ) _A = num_channels _A = image_size _A = width_coefficient _A = depth_coefficient _A = depth_divisor _A = kernel_sizes _A = in_channels _A = out_channels _A = depthwise_padding _A = strides _A = num_block_repeats _A = expand_ratios _A = squeeze_expansion_ratio _A = hidden_act _A = hidden_dim _A = pooling_type _A = initializer_range _A = batch_norm_eps _A = batch_norm_momentum _A = drop_connect_rate _A = sum(__A ) * 4 @classmethod def __A ( cls: List[Any] , __A: Union[str, os.PathLike] , **__A: Dict ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) _A ,_A = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": _A = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "align" A_ = True def __init__( self: int , __A: Dict=None , __A: str=None , __A: Optional[Any]=6_40 , __A: str=1.0 , __A: Union[str, Any]=0.02 , **__A: Union[str, Any] , ) -> str: super().__init__(**__A ) if text_config is None: _A = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' ) if vision_config is None: _A = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' ) _A = AlignTextConfig(**__A ) _A = AlignVisionConfig(**__A ) _A = projection_dim _A = temperature_init_value _A = initializer_range @classmethod def __A ( cls: Dict , __A: AlignTextConfig , __A: AlignVisionConfig , **__A: Optional[Any] ) -> Union[str, Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def __A ( self: List[Any] ) -> str: _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model'} __A = { 'vocab_file': { 'facebook/mbart-large-50-one-to-many-mmt': ( 'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model' ), } } __A = { 'facebook/mbart-large-50-one-to-many-mmt': 1024, } # fmt: off __A = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI'] class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = ["input_ids", "attention_mask"] A_ = [] A_ = [] def __init__( self: List[str] , __A: Dict , __A: Dict=None , __A: List[str]=None , __A: List[Any]="</s>" , __A: Any="</s>" , __A: Optional[Any]="<s>" , __A: List[str]="<unk>" , __A: Optional[Any]="<pad>" , __A: Tuple="<mask>" , __A: Optional[Dict[str, Any]] = None , **__A: Union[str, Any] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs _A = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__A , tgt_lang=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) _A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _A = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _A = 1 _A = len(self.sp_model ) _A = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A ) } _A = {v: k for k, v in self.lang_code_to_id.items()} _A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _A = src_lang if src_lang is not None else '''en_XX''' _A = self.lang_code_to_id[self._src_lang] _A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __A ( self: str ) -> int: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __A ( self: Any ) -> str: return self._src_lang @src_lang.setter def __A ( self: Dict , __A: str ) -> None: _A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self: str ) -> Dict: _A = self.__dict__.copy() _A = None return state def __setstate__( self: int , __A: Dict ) -> None: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __A ( self: List[Any] ) -> Dict: _A = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __A ( self: Tuple , __A: str ) -> List[str]: return self.sp_model.encode(__A , out_type=__A ) def __A ( self: List[str] , __A: str ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _A = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __A ( self: Tuple , __A: int ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __A ( self: List[Any] , __A: str ) -> Tuple: _A = [] _A = '''''' _A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__A ) + token _A = True _A = [] else: current_sub_tokens.append(__A ) _A = False out_string += self.sp_model.decode(__A ) return out_string.strip() def __A ( self: Dict , __A: str , __A: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def __A ( self: Optional[int] , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) _A = [1] * len(self.prefix_tokens ) _A = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__A )) + suffix_ones return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def __A ( self: Tuple , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __A ( self: Tuple , __A: List[str] , __A: str , __A: Optional[str] , __A: Optional[str] , **__A: Any ) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) _A = src_lang _A = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) _A = self.convert_tokens_to_ids(__A ) _A = tgt_lang_id return inputs def __A ( self: Optional[int] , __A: List[str] , __A: str = "en_XX" , __A: Optional[List[str]] = None , __A: str = "ro_RO" , **__A: Optional[Any] , ) -> BatchEncoding: _A = src_lang _A = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def __A ( self: List[str] ) -> int: return self.set_src_lang_special_tokens(self.src_lang ) def __A ( self: Tuple ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __A ( self: Dict , __A: str ) -> None: _A = self.lang_code_to_id[src_lang] _A = [self.cur_lang_code_id] _A = [self.eos_token_id] def __A ( self: List[str] , __A: str ) -> None: _A = self.lang_code_to_id[tgt_lang] _A = [self.cur_lang_code_id] _A = [self.eos_token_id]
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: List[Any] , __A: Any , __A: List[str]=13 , __A: Tuple=7 , __A: Any=True , __A: Union[str, Any]=True , __A: str=True , __A: List[str]=True , __A: Any=99 , __A: str=64 , __A: str=32 , __A: Tuple=5 , __A: Dict=4 , __A: Optional[int]=37 , __A: List[str]="gelu" , __A: Dict=0.1 , __A: Optional[int]=0.1 , __A: Optional[Any]=5_12 , __A: List[Any]=16 , __A: Any=2 , __A: List[str]=0.02 , __A: Union[str, Any]=3 , __A: Dict=4 , __A: Optional[Any]=None , ) -> str: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = embedding_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope def __A ( self: List[str] ) -> Optional[int]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self: List[str] ) -> Optional[Any]: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) def __A ( self: Optional[int] , __A: int , __A: Dict , __A: Tuple , __A: List[Any] , __A: int , __A: Optional[Any] , __A: List[Any] ) -> Union[str, Any]: _A = MegatronBertModel(config=__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A ) _A = model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __A ( self: int , __A: Dict , __A: Optional[int] , __A: Dict , __A: Union[str, Any] , __A: int , __A: int , __A: List[str] ) -> Optional[int]: _A = MegatronBertForMaskedLM(config=__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Any , __A: Optional[Any] , __A: Optional[int] , __A: int , __A: Union[str, Any] , __A: Optional[int] , __A: Optional[int] , __A: Optional[Any] ) -> int: _A = MegatronBertForCausalLM(config=__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: str , __A: Tuple , __A: List[str] , __A: Tuple , __A: List[str] , __A: Union[str, Any] , __A: Tuple , __A: List[Any] ) -> str: _A = MegatronBertForNextSentencePrediction(config=__A ) model.to(__A ) model.eval() _A = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __A ( self: List[Any] , __A: Optional[int] , __A: Union[str, Any] , __A: Any , __A: Any , __A: int , __A: Optional[int] , __A: Any ) -> List[str]: _A = MegatronBertForPreTraining(config=__A ) model.to(__A ) model.eval() _A = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __A ( self: Optional[Any] , __A: List[str] , __A: List[str] , __A: Tuple , __A: Dict , __A: Optional[Any] , __A: Dict , __A: Tuple ) -> Union[str, Any]: _A = MegatronBertForQuestionAnswering(config=__A ) model.to(__A ) model.eval() _A = model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: Optional[int] , __A: List[str] , __A: List[str] , __A: Dict , __A: Optional[Any] ) -> Union[str, Any]: _A = self.num_labels _A = MegatronBertForSequenceClassification(__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self: Union[str, Any] , __A: List[Any] , __A: Tuple , __A: Optional[int] , __A: List[str] , __A: Any , __A: str , __A: int ) -> Union[str, Any]: _A = self.num_labels _A = MegatronBertForTokenClassification(config=__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self: str , __A: Optional[int] , __A: List[str] , __A: List[str] , __A: Dict , __A: int , __A: str , __A: str ) -> Dict: _A = self.num_choices _A = MegatronBertForMultipleChoice(config=__A ) model.to(__A ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self: List[Any] ) -> Any: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A_ = True # test_resize_embeddings = False A_ = False def __A ( self: List[Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Any=False ) -> Dict: _A = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class in get_values(__A ): _A = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A ) _A = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def __A ( self: List[str] ) -> str: _A = MegatronBertModelTester(self ) _A = ConfigTester(self , config_class=__A , hidden_size=37 ) def __A ( self: int ) -> Any: self.config_tester.run_common_tests() def __A ( self: List[str] ) -> List[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__A ) def __A ( self: int ) -> Union[str, Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A ) def __A ( self: Tuple ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A ) def __A ( self: List[Any] ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A ) def __A ( self: Union[str, Any] ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A ) def __A ( self: Optional[Any] ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A ) def __A ( self: Union[str, Any] ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A ) def __A ( self: int ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A ) def __A ( _lowercase ): '''simple docstring''' return torch.tensor( _lowercase , dtype=torch.long , device=_lowercase , ) __A = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('''Model is not available.''' ) def __A ( self: List[str] ) -> Dict: _A = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: _A = os.path.join(os.environ['''MYDIR'''] , __A ) _A = MegatronBertModel.from_pretrained(__A ) model.to(__A ) model.half() _A = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): _A = model(__A )[0] _A = torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , __A ) _A = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3 ): for jj in range(3 ): _A = output[0, ii, jj] _A = expected[3 * ii + jj] _A = '''ii={} jj={} a={} b={}'''.format(__A , __A , __A , __A ) self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A )
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
def __A ( _lowercase , _lowercase ): '''simple docstring''' return int(input_a == input_a == 0 ) def __A ( ): '''simple docstring''' print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self: Optional[Any] , __A: Dict , __A: List[str]=13 , __A: Any=3 , __A: List[Any]=2_24 , __A: List[str]=30 , __A: List[str]=4_00 , __A: Optional[int]=True , __A: Optional[Any]=None , __A: Union[str, Any]=True , __A: List[str]=[0.5, 0.5, 0.5] , __A: Optional[Any]=[0.5, 0.5, 0.5] , ) -> Optional[int]: _A = size if size is not None else {'''height''': 18, '''width''': 18} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std def __A ( self: Any ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = ViTImageProcessor if is_vision_available() else None def __A ( self: List[Any] ) -> Optional[int]: _A = EfficientFormerImageProcessorTester(self ) @property def __A ( self: int ) -> Tuple: return self.image_proc_tester.prepare_image_processor_dict() def __A ( self: Union[str, Any] ) -> str: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , '''image_mean''' ) ) self.assertTrue(hasattr(__A , '''image_std''' ) ) self.assertTrue(hasattr(__A , '''do_normalize''' ) ) self.assertTrue(hasattr(__A , '''do_resize''' ) ) self.assertTrue(hasattr(__A , '''size''' ) ) def __A ( self: List[Any] ) -> int: pass def __A ( self: Optional[Any] ) -> Tuple: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _A = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched _A = image_processor(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def __A ( self: List[Any] ) -> Optional[int]: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input _A = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched _A = image_processor(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def __A ( self: List[Any] ) -> Any: # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input _A = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched _A = image_processor(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BertJapaneseTokenizer A_ = False A_ = True def __A ( self: int ) -> int: super().setUp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __A ( self: Dict , __A: List[Any] ) -> List[str]: _A = '''こんにちは、世界。 \nこんばんは、世界。''' _A = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __A ( self: List[str] , __A: str ) -> Dict: _A ,_A = self.get_input_output_texts(__A ) _A = tokenizer.encode(__A , add_special_tokens=__A ) _A = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def __A ( self: Union[str, Any] ) -> List[str]: pass # TODO add if relevant def __A ( self: List[str] ) -> List[str]: pass # TODO add if relevant def __A ( self: str ) -> List[Any]: pass # TODO add if relevant def __A ( self: int ) -> Optional[Any]: _A = self.tokenizer_class(self.vocab_file ) _A = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(__A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __A ( self: Tuple ) -> Union[str, Any]: _A = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(__A ) _A = '''こんにちは、世界。\nこんばんは、世界。''' _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _A = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(__A , '''wb''' ) as handle: pickle.dump(__A , __A ) with open(__A , '''rb''' ) as handle: _A = pickle.load(__A ) _A = tokenizer_new.tokenize(__A ) self.assertListEqual(__A , __A ) def __A ( self: Any ) -> List[str]: _A = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __A ( self: Optional[int] ) -> Union[str, Any]: try: _A = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __A ( self: int ) -> Any: try: _A = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __A ( self: Optional[int] ) -> int: _A = MecabTokenizer(do_lower_case=__A , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __A ( self: Optional[Any] ) -> Any: try: _A = MecabTokenizer( do_lower_case=__A , normalize_text=__A , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __A ( self: List[Any] ) -> List[Any]: _A = MecabTokenizer(normalize_text=__A , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __A ( self: int ) -> Any: _A = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(__A ) _A = '''こんにちは、世界。\nこんばんは、世界。''' _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _A = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(__A , '''wb''' ) as handle: pickle.dump(__A , __A ) with open(__A , '''rb''' ) as handle: _A = pickle.load(__A ) _A = tokenizer_new.tokenize(__A ) self.assertListEqual(__A , __A ) @require_sudachi def __A ( self: Union[str, Any] ) -> Dict: _A = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __A ( self: Optional[Any] ) -> Union[str, Any]: _A = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __A ( self: str ) -> Optional[int]: _A = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __A ( self: Tuple ) -> Any: _A = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __A ( self: Optional[int] ) -> int: _A = SudachiTokenizer(do_lower_case=__A , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __A ( self: Tuple ) -> List[Any]: _A = SudachiTokenizer(normalize_text=__A , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __A ( self: Optional[int] ) -> List[Any]: _A = SudachiTokenizer(trim_whitespace=__A , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __A ( self: str ) -> Optional[int]: _A = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(__A ) _A = '''こんにちは、世界。\nこんばんは、世界。''' _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _A = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(__A , '''wb''' ) as handle: pickle.dump(__A , __A ) with open(__A , '''rb''' ) as handle: _A = pickle.load(__A ) _A = tokenizer_new.tokenize(__A ) self.assertListEqual(__A , __A ) @require_jumanpp def __A ( self: List[str] ) -> Optional[Any]: _A = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __A ( self: Optional[Any] ) -> Dict: _A = JumanppTokenizer(do_lower_case=__A ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __A ( self: List[Any] ) -> Optional[Any]: _A = JumanppTokenizer(normalize_text=__A ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __A ( self: str ) -> Union[str, Any]: _A = JumanppTokenizer(trim_whitespace=__A ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __A ( self: Optional[int] ) -> Union[str, Any]: _A = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __A ( self: Optional[Any] ) -> Union[str, Any]: _A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] _A = {} for i, token in enumerate(__A ): _A = i _A = WordpieceTokenizer(vocab=__A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __A ( self: Union[str, Any] ) -> str: _A = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) _A = tokenizer.subword_tokenizer _A = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(__A , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) _A = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(__A , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __A ( self: List[str] ) -> Union[str, Any]: _A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) _A = tokenizer.encode('''ありがとう。''' , add_special_tokens=__A ) _A = tokenizer.encode('''どういたしまして。''' , add_special_tokens=__A ) _A = tokenizer.build_inputs_with_special_tokens(__A ) _A = tokenizer.build_inputs_with_special_tokens(__A , __A ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BertJapaneseTokenizer A_ = False def __A ( self: Optional[Any] ) -> Union[str, Any]: super().setUp() _A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __A ( self: Optional[Any] , **__A: str ) -> Dict: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **__A ) def __A ( self: List[str] , __A: Union[str, Any] ) -> Optional[int]: _A = '''こんにちは、世界。 \nこんばんは、世界。''' _A = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __A ( self: int ) -> List[str]: pass # TODO add if relevant def __A ( self: Dict ) -> Optional[int]: pass # TODO add if relevant def __A ( self: List[Any] ) -> Optional[int]: pass # TODO add if relevant def __A ( self: Optional[int] ) -> List[str]: _A = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) _A = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( __A , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__A ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __A ( self: Tuple ) -> Any: _A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] _A = {} for i, token in enumerate(__A ): _A = i _A = CharacterTokenizer(vocab=__A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __A ( self: Optional[Any] ) -> Dict: _A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) _A = tokenizer.encode('''ありがとう。''' , add_special_tokens=__A ) _A = tokenizer.encode('''どういたしまして。''' , add_special_tokens=__A ) _A = tokenizer.build_inputs_with_special_tokens(__A ) _A = tokenizer.build_inputs_with_special_tokens(__A , __A ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Union[str, Any] ) -> List[Any]: _A = '''cl-tohoku/bert-base-japanese''' _A = AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , __A ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Dict ) -> List[Any]: _A = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(__A ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) _A = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(__A ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __A = 'hf-internal-testing/tiny-random-bert' __A = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') __A = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Union[str, Any] ) -> Optional[Any]: _A = cached_file(__A , __A ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(__A ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(__A , __A ) ) ) with open(os.path.join(__A , '''refs''' , '''main''' ) ) as f: _A = f.read() self.assertEqual(__A , os.path.join(__A , '''snapshots''' , __A , __A ) ) self.assertTrue(os.path.isfile(__A ) ) # File is cached at the same place the second time. _A = cached_file(__A , __A ) self.assertEqual(__A , __A ) # Using a specific revision to test the full commit hash. _A = cached_file(__A , __A , revision='''9b8c223''' ) self.assertEqual(__A , os.path.join(__A , '''snapshots''' , __A , __A ) ) def __A ( self: Optional[Any] ) -> Dict: with self.assertRaisesRegex(__A , '''is not a valid model identifier''' ): _A = cached_file('''tiny-random-bert''' , __A ) with self.assertRaisesRegex(__A , '''is not a valid git identifier''' ): _A = cached_file(__A , __A , revision='''aaaa''' ) with self.assertRaisesRegex(__A , '''does not appear to have a file named''' ): _A = cached_file(__A , '''conf''' ) def __A ( self: Dict ) -> List[str]: with self.assertRaisesRegex(__A , '''does not appear to have a file named''' ): _A = cached_file(__A , '''conf''' ) with open(os.path.join(__A , '''refs''' , '''main''' ) ) as f: _A = f.read() self.assertTrue(os.path.isfile(os.path.join(__A , '''.no_exist''' , __A , '''conf''' ) ) ) _A = cached_file(__A , '''conf''' , _raise_exceptions_for_missing_entries=__A ) self.assertIsNone(__A ) _A = cached_file(__A , '''conf''' , local_files_only=__A , _raise_exceptions_for_missing_entries=__A ) self.assertIsNone(__A ) _A = mock.Mock() _A = 5_00 _A = {} _A = HTTPError _A = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=__A ) as mock_head: _A = cached_file(__A , '''conf''' , _raise_exceptions_for_connection_errors=__A ) self.assertIsNone(__A ) # This check we did call the fake head request mock_head.assert_called() def __A ( self: Optional[Any] ) -> Dict: self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __A ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __A ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __A ) ) def __A ( self: Tuple ) -> Tuple: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(__A , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , __A ) # The function raises if the revision does not exist. with self.assertRaisesRegex(__A , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , __A , revision='''ahaha''' ) _A = get_file_from_repo('''bert-base-cased''' , __A ) # The name is the cached name which is not very easy to test, so instead we load the content. _A = json.loads(open(__A , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __A ( self: str ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: _A = Path(__A ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(__A , '''a.txt''' ) , str(__A ) ) self.assertIsNone(get_file_from_repo(__A , '''b.txt''' ) )
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor __A = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , torch.Tensor ): return image elif isinstance(_lowercase , PIL.Image.Image ): _A = [image] _A = [trans(img.convert('''RGB''' ) ) for img in image] _A = torch.stack(_lowercase ) return image class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: Dict , __A: Optional[int] , __A: int ) -> Optional[Any]: super().__init__() # make sure scheduler can always be converted to DDIM _A = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=__A , scheduler=__A ) def __A ( self: List[Any] , __A: Dict ) -> str: if strength < 0 or strength > 1: raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" ) def __A ( self: Tuple , __A: str , __A: Tuple , __A: Tuple ) -> str: # get the original timestep using init_timestep _A = min(int(num_inference_steps * strength ) , __A ) _A = max(num_inference_steps - init_timestep , 0 ) _A = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __A ( self: Union[str, Any] , __A: Optional[int] , __A: List[str] , __A: List[str] , __A: Dict , __A: str , __A: Tuple=None ) -> List[Any]: if not isinstance(__A , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__A )}""" ) _A = image.to(device=__A , dtype=__A ) if isinstance(__A , __A ) and len(__A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(__A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) _A = init_latents.shape _A = randn_tensor(__A , generator=__A , device=__A , dtype=__A ) # get latents print('''add noise to latents at timestep''' , __A ) _A = self.scheduler.add_noise(__A , __A , __A ) _A = init_latents return latents @torch.no_grad() def __call__( self: Dict , __A: Union[torch.FloatTensor, PIL.Image.Image] = None , __A: float = 0.8 , __A: int = 1 , __A: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A: float = 0.0 , __A: int = 50 , __A: Optional[bool] = None , __A: Optional[str] = "pil" , __A: bool = True , ) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(__A ) # 2. Preprocess image _A = preprocess(__A ) # 3. set timesteps self.scheduler.set_timesteps(__A , device=self.device ) _A ,_A = self.get_timesteps(__A , __A , self.device ) _A = timesteps[:1].repeat(__A ) # 4. Prepare latent variables _A = self.prepare_latents(__A , __A , __A , self.unet.dtype , self.device , __A ) _A = latents # 5. Denoising loop for t in self.progress_bar(__A ): # 1. predict noise model_output _A = self.unet(__A , __A ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _A = self.scheduler.step( __A , __A , __A , eta=__A , use_clipped_model_output=__A , generator=__A , ).prev_sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__A ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=__A )
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __A ( ): '''simple docstring''' _A = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' _A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('''RGB''' ) return image def __A ( _lowercase ): '''simple docstring''' _A = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') ) # fmt: on return rename_keys def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = dct.pop(_lowercase ) _A = val def __A ( _lowercase , _lowercase ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _A = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" ) _A = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict _A = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) ) _A = qkv_bias def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = 3_64 if '''coco''' in model_name else 2_24 _A = BlipaVisionConfig(image_size=_lowercase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _A = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowercase ).to_dict() elif "opt-6.7b" in model_name: _A = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowercase ).to_dict() elif "t5-xl" in model_name: _A = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _A = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() _A = BlipaConfig(vision_config=_lowercase , text_config=_lowercase ) return config, image_size @torch.no_grad() def __A ( _lowercase , _lowercase=None , _lowercase=False ): '''simple docstring''' _A = ( AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' ) ) _A = tokenizer('''\n''' , add_special_tokens=_lowercase ).input_ids[0] _A ,_A = get_blipa_config(_lowercase , eos_token_id=_lowercase ) _A = BlipaForConditionalGeneration(_lowercase ).eval() _A = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } _A ,_A = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) _A = '''cuda''' if torch.cuda.is_available() else '''cpu''' _A ,_A ,_A = load_model_and_preprocess( name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase ) original_model.eval() print('''Done!''' ) # update state dict keys _A = original_model.state_dict() _A = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _A = state_dict.pop(_lowercase ) if key.startswith('''Qformer.bert''' ): _A = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: _A = key.replace('''self''' , '''attention''' ) if "opt_proj" in key: _A = key.replace('''opt_proj''' , '''language_projection''' ) if "t5_proj" in key: _A = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''opt''' ): _A = key.replace('''opt''' , '''language''' ) if key.startswith('''t5''' ): _A = key.replace('''t5''' , '''language''' ) _A = val # read in qv biases read_in_q_v_bias(_lowercase , _lowercase ) _A ,_A = hf_model.load_state_dict(_lowercase , strict=_lowercase ) assert len(_lowercase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _A = load_demo_image() _A = vis_processors['''eval'''](_lowercase ).unsqueeze(0 ).to(_lowercase ) _A = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowercase ) # create processor _A = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=_lowercase , image_std=_lowercase ) _A = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase ) _A = processor(images=_lowercase , return_tensors='''pt''' ).pixel_values.to(_lowercase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowercase , _lowercase ) original_model.to(_lowercase ) hf_model.to(_lowercase ) with torch.no_grad(): if "opt" in model_name: _A = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits _A = hf_model(_lowercase , _lowercase ).logits else: _A = original_model( {'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits _A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _A = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits assert original_logits.shape == logits.shape print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _A = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase ) assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _A = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase ) else: # cast to same type _A = logits.dtype assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 ) print('''Looks ok!''' ) print('''Generating a caption...''' ) _A = '''''' _A = tokenizer(_lowercase , return_tensors='''pt''' ).input_ids.to(_lowercase ) _A = original_model.generate({'''image''': original_pixel_values} ) _A = hf_model.generate( _lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('''Original generation:''' , _lowercase ) _A = input_ids.shape[1] _A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase ) _A = [text.strip() for text in output_text] print('''HF generation:''' , _lowercase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if push_to_hub: processor.push_to_hub(f"""nielsr/{model_name}""" ) hf_model.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() __A = [ 'blip2-opt-2.7b', 'blip2-opt-6.7b', 'blip2-opt-2.7b-coco', 'blip2-opt-6.7b-coco', 'blip2-flan-t5-xl', 'blip2-flan-t5-xl-coco', 'blip2-flan-t5-xxl', ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) __A = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[Any] , __A: Tuple , __A: Optional[Any]=14 , __A: Optional[Any]=7 , __A: Optional[Any]=True , __A: Optional[int]=True , __A: str=False , __A: Any=True , __A: Optional[Any]=99 , __A: Optional[Any]=32 , __A: Optional[int]=4 , __A: List[Any]=4 , __A: Optional[int]=4 , __A: Dict=37 , __A: Union[str, Any]="gelu" , __A: Any=0.1 , __A: Optional[int]=0.1 , __A: List[Any]=5_12 , __A: List[str]=0.02 , ) -> List[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = rotary_dim _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = initializer_range _A = None _A = vocab_size - 1 _A = vocab_size - 1 _A = vocab_size - 1 def __A ( self: str ) -> Any: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def __A ( self: Any ) -> List[str]: _A = self.prepare_config_and_inputs() _A ,_A ,_A = config_and_inputs _A = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __A ( self: Dict , __A: int , __A: Tuple , __A: int , __A: int ) -> Dict: _A = 20 _A = model_class_name(__A ) _A = model.init_cache(input_ids.shape[0] , __A ) _A = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) _A = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _A = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) _A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) _A = model( input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , ) _A = model(__A ) _A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def __A ( self: Any , __A: Optional[Any] , __A: Any , __A: Optional[int] , __A: Dict ) -> str: _A = 20 _A = model_class_name(__A ) _A = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) _A = model.init_cache(input_ids.shape[0] , __A ) _A = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _A = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) _A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) _A = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , ) _A = model(__A , attention_mask=__A ) _A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () A_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def __A ( self: Dict ) -> Dict: _A = FlaxGPTJModelTester(self ) def __A ( self: Any ) -> Optional[Any]: for model_class_name in self.all_model_classes: _A ,_A ,_A = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__A , __A , __A , __A ) def __A ( self: int ) -> Union[str, Any]: for model_class_name in self.all_model_classes: _A ,_A ,_A = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __A , __A , __A , __A ) @tooslow def __A ( self: str ) -> Optional[Any]: _A = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) _A = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=__A , truncation=__A ) _A = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) _A = False _A = model.config.eos_token_id _A = jax.jit(model.generate ) _A = jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences _A = tokenizer.batch_decode(__A , skip_special_tokens=__A ) _A = [ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(__A , __A ) @is_pt_flax_cross_test def __A ( self: Optional[Any] ) -> Union[str, Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _A = self._prepare_for_class(__A , __A ) _A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _A = model_class.__name__[4:] # Skip the "Flax" at the beginning _A = getattr(__A , __A ) _A ,_A = pt_inputs['''input_ids'''].shape _A = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): _A = 0 _A = 1 _A = 0 _A = 1 _A = pt_model_class(__A ).eval() _A = model_class(__A , dtype=jnp.floataa ) _A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A ) _A = fx_state with torch.no_grad(): _A = pt_model(**__A ).to_tuple() _A = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__A ) _A = model_class.from_pretrained(__A , from_pt=__A ) _A = fx_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def __A ( self: List[Any] ) -> List[str]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _A = self._prepare_for_class(__A , __A ) _A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _A = model_class.__name__[4:] # Skip the "Flax" at the beginning _A = getattr(__A , __A ) _A = pt_model_class(__A ).eval() _A = model_class(__A , dtype=jnp.floataa ) _A = load_flax_weights_in_pytorch_model(__A , fx_model.params ) _A ,_A = pt_inputs['''input_ids'''].shape _A = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): _A = 0 _A = 1 _A = 0 _A = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): _A = pt_model(**__A ).to_tuple() _A = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__A ) _A = pt_model_class.from_pretrained(__A , from_flax=__A ) with torch.no_grad(): _A = pt_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def __A ( self: Tuple ) -> Tuple: for model_class_name in self.all_model_classes: _A = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) _A = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
import comet # From: unbabel-comet import torch import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n' __A = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n' __A = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def __A ( self: Tuple ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def __A ( self: Union[str, Any] , __A: str ) -> Dict: if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def __A ( self: Tuple , __A: str , __A: Union[str, Any] , __A: Union[str, Any] , __A: Any=None , __A: Optional[int]=False ) -> Dict: if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {'''src''': sources, '''mt''': predictions, '''ref''': references} _A = [dict(zip(__A , __A ) ) for t in zip(*data.values() )] _A ,_A = self.scorer.predict(__A , gpus=__A , progress_bar=__A ) return {"mean_score": mean_score, "scores": scores}
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = AudioLDMPipeline A_ = TEXT_TO_AUDIO_PARAMS A_ = TEXT_TO_AUDIO_BATCH_PARAMS A_ = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def __A ( self: List[Any] ) -> Dict: torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__A , ) _A = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) _A = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , ) _A = ClapTextModelWithProjection(__A ) _A = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 ) _A = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__A , ) _A = SpeechTaHifiGan(__A ) _A = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def __A ( self: Optional[int] , __A: List[str] , __A: List[str]=0 ) -> Tuple: if str(__A ).startswith('''mps''' ): _A = torch.manual_seed(__A ) else: _A = torch.Generator(device=__A ).manual_seed(__A ) _A = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def __A ( self: int ) -> Dict: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = AudioLDMPipeline(**__A ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A ) _A = audioldm_pipe(**__A ) _A = output.audios[0] assert audio.ndim == 1 assert len(__A ) == 2_56 _A = audio[:10] _A = np.array( [-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def __A ( self: Optional[Any] ) -> Tuple: _A = self.get_dummy_components() _A = AudioLDMPipeline(**__A ) _A = audioldm_pipe.to(__A ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A ) _A = 3 * [inputs['''prompt''']] # forward _A = audioldm_pipe(**__A ) _A = output.audios[0] _A = self.get_dummy_inputs(__A ) _A = 3 * [inputs.pop('''prompt''' )] _A = audioldm_pipe.tokenizer( __A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors='''pt''' , ) _A = text_inputs['''input_ids'''].to(__A ) _A = audioldm_pipe.text_encoder( __A , ) _A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state _A = F.normalize(__A , dim=-1 ) _A = prompt_embeds # forward _A = audioldm_pipe(**__A ) _A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def __A ( self: Union[str, Any] ) -> Optional[Any]: _A = self.get_dummy_components() _A = AudioLDMPipeline(**__A ) _A = audioldm_pipe.to(__A ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A ) _A = 3 * ['''this is a negative prompt'''] _A = negative_prompt _A = 3 * [inputs['''prompt''']] # forward _A = audioldm_pipe(**__A ) _A = output.audios[0] _A = self.get_dummy_inputs(__A ) _A = 3 * [inputs.pop('''prompt''' )] _A = [] for p in [prompt, negative_prompt]: _A = audioldm_pipe.tokenizer( __A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors='''pt''' , ) _A = text_inputs['''input_ids'''].to(__A ) _A = audioldm_pipe.text_encoder( __A , ) _A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state _A = F.normalize(__A , dim=-1 ) embeds.append(__A ) _A ,_A = embeds # forward _A = audioldm_pipe(**__A ) _A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def __A ( self: int ) -> str: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = PNDMScheduler(skip_prk_steps=__A ) _A = AudioLDMPipeline(**__A ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A ) _A = '''egg cracking''' _A = audioldm_pipe(**__A , negative_prompt=__A ) _A = output.audios[0] assert audio.ndim == 1 assert len(__A ) == 2_56 _A = audio[:10] _A = np.array( [-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def __A ( self: Optional[Any] ) -> Union[str, Any]: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = PNDMScheduler(skip_prk_steps=__A ) _A = AudioLDMPipeline(**__A ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) _A = audioldm_pipe(__A , num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts _A = 2 _A = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt _A = 2 _A = audioldm_pipe(__A , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts _A = 2 _A = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def __A ( self: Union[str, Any] ) -> Any: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = AudioLDMPipeline(**__A ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = audioldm_pipe.vocoder.config.sampling_rate _A = self.get_dummy_inputs(__A ) _A = audioldm_pipe(audio_length_in_s=0.016 , **__A ) _A = output.audios[0] assert audio.ndim == 1 assert len(__A ) / vocoder_sampling_rate == 0.016 _A = audioldm_pipe(audio_length_in_s=0.032 , **__A ) _A = output.audios[0] assert audio.ndim == 1 assert len(__A ) / vocoder_sampling_rate == 0.032 def __A ( self: str ) -> Any: _A = self.get_dummy_components() _A = AudioLDMPipeline(**__A ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = ['''hey'''] _A = audioldm_pipe(__A , num_inference_steps=1 ) _A = output.audios.shape assert audio_shape == (1, 2_56) _A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 _A = SpeechTaHifiGan(__A ).to(__A ) _A = audioldm_pipe(__A , num_inference_steps=1 ) _A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def __A ( self: Union[str, Any] ) -> Optional[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A ) def __A ( self: int ) -> Union[str, Any]: self._test_inference_batch_single_identical(test_mean_pixel_difference=__A ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __A ( self: Dict ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A ) @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: List[str] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self: int , __A: Optional[Any] , __A: str="cpu" , __A: Dict=torch.floataa , __A: Union[str, Any]=0 ) -> Any: _A = torch.Generator(device=__A ).manual_seed(__A ) _A = np.random.RandomState(__A ).standard_normal((1, 8, 1_28, 16) ) _A = torch.from_numpy(__A ).to(device=__A , dtype=__A ) _A = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def __A ( self: Any ) -> int: _A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = self.get_inputs(__A ) _A = 25 _A = audioldm_pipe(**__A ).audios[0] assert audio.ndim == 1 assert len(__A ) == 8_19_20 _A = audio[7_72_30:7_72_40] _A = np.array( [-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] ) _A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def __A ( self: Optional[Any] ) -> str: _A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) _A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) _A = audioldm_pipe.to(__A ) audioldm_pipe.set_progress_bar_config(disable=__A ) _A = self.get_inputs(__A ) _A = audioldm_pipe(**__A ).audios[0] assert audio.ndim == 1 assert len(__A ) == 8_19_20 _A = audio[2_77_80:2_77_90] _A = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] ) _A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ): """simple docstring""" A_ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self: int , __A: int , __A: int , __A: Optional[int] = None , __A: int = 5_02_57 , __A: int = 10_24 , __A: int = 7_68 , __A: int = 12 , __A: int = 12 , __A: Optional[int] = None , __A: str = "gelu_new" , __A: float = 0.1 , __A: float = 0.1 , __A: float = 0.1 , __A: float = 1e-5 , __A: float = 0.02 , __A: bool = True , __A: bool = True , __A: bool = False , __A: bool = False , ) -> Union[str, Any]: super().__init__() _A = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) _A = prefix_inner_dim _A = prefix_hidden_dim _A = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) _A = ( nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity() ) _A = GPTaConfig( vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , ) _A = GPTaLMHeadModel(__A ) def __A ( self: Any , __A: torch.Tensor , __A: torch.Tensor , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , ) -> List[Any]: _A = self.transformer.transformer.wte(__A ) _A = self.encode_prefix(__A ) _A = self.decode_prefix(__A ) _A = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: _A = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) _A = torch.cat((dummy_token, input_ids) , dim=1 ) _A = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __A ( self: Union[str, Any] , __A: int , __A: torch.device ) -> torch.Tensor: return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A ) def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Any: return self.encode_prefix(__A ) @torch.no_grad() def __A ( self: Any , __A: Dict , __A: Tuple , __A: str ) -> Tuple: _A = torch.split(__A , 1 , dim=0 ) _A = [] _A = [] for feature in features: _A = self.decode_prefix(feature.to(__A ) ) # back to the clip feature # Only support beam search for now _A ,_A = self.generate_beam( input_embeds=__A , device=__A , eos_token_id=__A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) _A = torch.stack(__A ) _A = torch.stack(__A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __A ( self: str , __A: str=None , __A: Any=None , __A: Dict=None , __A: int = 5 , __A: int = 67 , __A: float = 1.0 , __A: Optional[int] = None , ) -> Optional[Any]: _A = eos_token_id _A = None _A = None _A = torch.ones(__A , device=__A , dtype=torch.int ) _A = torch.zeros(__A , device=__A , dtype=torch.bool ) if input_embeds is not None: _A = input_embeds else: _A = self.transformer.transformer.wte(__A ) for i in range(__A ): _A = self.transformer(inputs_embeds=__A ) _A = outputs.logits _A = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) _A = logits.softmax(-1 ).log() if scores is None: _A ,_A = logits.topk(__A , -1 ) _A = generated.expand(__A , *generated.shape[1:] ) _A ,_A = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: _A = next_tokens else: _A = tokens.expand(__A , *tokens.shape[1:] ) _A = torch.cat((tokens, next_tokens) , dim=1 ) else: _A = -float(np.inf ) _A = 0 _A = scores[:, None] + logits seq_lengths[~is_stopped] += 1 _A = scores_sum / seq_lengths[:, None] _A ,_A = scores_sum_average.view(-1 ).topk(__A , -1 ) _A = next_tokens // scores_sum.shape[1] _A = seq_lengths[next_tokens_source] _A = next_tokens % scores_sum.shape[1] _A = next_tokens.unsqueeze(1 ) _A = tokens[next_tokens_source] _A = torch.cat((tokens, next_tokens) , dim=1 ) _A = generated[next_tokens_source] _A = scores_sum_average * seq_lengths _A = is_stopped[next_tokens_source] _A = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) _A = torch.cat((generated, next_token_embed) , dim=1 ) _A = is_stopped + next_tokens.eq(__A ).squeeze() if is_stopped.all(): break _A = scores / seq_lengths _A = scores.argsort(descending=__A ) # tokens tensors are already padded to max_seq_length _A = [tokens[i] for i in order] _A = torch.stack(__A , dim=0 ) _A = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
__A = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __A = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: Optional[Any] , __A: AutoencoderKL , __A: CLIPTextModel , __A: CLIPTokenizer , __A: UNetaDConditionModel , __A: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __A: StableDiffusionSafetyChecker , __A: CLIPImageProcessor , ) -> Optional[Any]: super().__init__() self.register_modules( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , ) def __A ( self: List[str] , __A: Optional[Union[str, int]] = "auto" ) -> Union[str, Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__A ) def __A ( self: List[str] ) -> Tuple: self.enable_attention_slicing(__A ) @torch.no_grad() def __call__( self: Dict , __A: Union[str, List[str]] , __A: int = 5_12 , __A: int = 5_12 , __A: int = 50 , __A: float = 7.5 , __A: Optional[Union[str, List[str]]] = None , __A: Optional[int] = 1 , __A: float = 0.0 , __A: Optional[torch.Generator] = None , __A: Optional[torch.FloatTensor] = None , __A: Optional[str] = "pil" , __A: bool = True , __A: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A: int = 1 , __A: Optional[torch.FloatTensor] = None , **__A: List[str] , ) -> int: if isinstance(__A , __A ): _A = 1 elif isinstance(__A , __A ): _A = len(__A ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__A )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__A )}.""" ) # get prompt text embeddings _A = self.tokenizer( __A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) _A = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _A ,_A ,_A = text_embeddings.shape _A = text_embeddings.repeat(1 , __A , 1 ) _A = text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _A = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _A = 42 if negative_prompt is None: _A = [''''''] elif type(__A ) is not type(__A ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !=""" f""" {type(__A )}.""" ) elif isinstance(__A , __A ): _A = [negative_prompt] elif batch_size != len(__A ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ''' the batch size of `prompt`.''' ) else: _A = negative_prompt _A = text_input_ids.shape[-1] _A = self.tokenizer( __A , padding='''max_length''' , max_length=__A , truncation=__A , return_tensors='''pt''' , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = uncond_embeddings.shape[1] _A = uncond_embeddings.repeat(__A , __A , 1 ) _A = uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) _A = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _A = torch.randn( __A , generator=__A , device='''cpu''' , dtype=__A ).to(self.device ) _A = torch.randn(__A , generator=__A , device='''cpu''' , dtype=__A ).to( self.device ) else: _A = torch.randn( __A , generator=__A , device=self.device , dtype=__A ) _A = torch.randn(__A , generator=__A , device=self.device , dtype=__A ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _A = latents_reference.to(self.device ) _A = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images _A = (latents_shape[3] - latents_shape_reference[3]) // 2 _A = (latents_shape[2] - latents_shape_reference[2]) // 2 _A = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx _A = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy _A = 0 if dx < 0 else dx _A = 0 if dy < 0 else dy _A = max(-dx , 0 ) _A = max(-dy , 0 ) # import pdb # pdb.set_trace() _A = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _A = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _A = {} if accepts_eta: _A = eta for i, t in enumerate(self.progress_bar(__A ) ): # expand the latents if we are doing classifier free guidance _A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A = self.scheduler.scale_model_input(__A , __A ) # predict the noise residual _A = self.unet(__A , __A , encoder_hidden_states=__A ).sample # perform guidance if do_classifier_free_guidance: _A ,_A = noise_pred.chunk(2 ) _A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__A , __A , __A , **__A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__A , __A , __A ) _A = 1 / 0.18_215 * latents _A = self.vae.decode(__A ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: _A = self.feature_extractor(self.numpy_to_pil(__A ) , return_tensors='''pt''' ).to( self.device ) _A ,_A = self.safety_checker( images=__A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: _A = None if output_type == "pil": _A = self.numpy_to_pil(__A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
from __future__ import annotations from math import pi def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
700
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
0