code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __A : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> str: super().__init__() self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , )-> Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: lowerCamelCase_ =self.unet.config.sample_size / self.unet.config.sample_rate lowerCamelCase_ =audio_length_in_s * self.unet.config.sample_rate lowerCamelCase_ =2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to' f' {3 * down_scale_factor / self.unet.config.sample_rate}.' ) lowerCamelCase_ =int(_SCREAMING_SNAKE_CASE ) if sample_size % down_scale_factor != 0: lowerCamelCase_ =( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled' f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising' """ process.""" ) lowerCamelCase_ =int(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =next(iter(self.unet.parameters() ) ).dtype lowerCamelCase_ =(batch_size, self.unet.config.in_channels, sample_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) lowerCamelCase_ =randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE ) # set step values self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=audio.device ) lowerCamelCase_ =self.scheduler.timesteps.to(_SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample # 2. compute previous image: x_t -> t_t-1 lowerCamelCase_ =self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample lowerCamelCase_ =audio.clamp(-1 , 1 ).float().cpu().numpy() lowerCamelCase_ =audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
75
import os from datetime import datetime as dt from github import Github __A : Optional[int] = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase_ =g.get_repo("""huggingface/diffusers""" ) lowerCamelCase_ =repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase_ =sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase_ =comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
75
1
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig __A : Dict = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring __A : int = 'UperNetConfig' class _SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1 , )-> None: super().__init__() lowerCamelCase_ =nn.Convad( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =nn.BatchNormad(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =nn.ReLU() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor: lowerCamelCase_ =self.conv(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.batch_norm(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.activation(_SCREAMING_SNAKE_CASE ) return output class _SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None: super().__init__() lowerCamelCase_ =[ nn.AdaptiveAvgPoolad(_SCREAMING_SNAKE_CASE ), UperNetConvModule(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor: lowerCamelCase_ =input for layer in self.layers: lowerCamelCase_ =layer(_SCREAMING_SNAKE_CASE ) return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None: super().__init__() lowerCamelCase_ =pool_scales lowerCamelCase_ =align_corners lowerCamelCase_ =in_channels lowerCamelCase_ =channels lowerCamelCase_ =[] for i, pool_scale in enumerate(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =UperNetPyramidPoolingBlock(pool_scale=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , channels=_SCREAMING_SNAKE_CASE ) self.blocks.append(_SCREAMING_SNAKE_CASE ) self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[torch.Tensor]: lowerCamelCase_ =[] for ppm in self.blocks: lowerCamelCase_ =ppm(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =nn.functional.interpolate( _SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners ) ppm_outs.append(_SCREAMING_SNAKE_CASE ) return ppm_outs class _SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: super().__init__() lowerCamelCase_ =config lowerCamelCase_ =config.pool_scales # e.g. (1, 2, 3, 6) lowerCamelCase_ =in_channels lowerCamelCase_ =config.hidden_size lowerCamelCase_ =False lowerCamelCase_ =nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module lowerCamelCase_ =UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) lowerCamelCase_ =UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module lowerCamelCase_ =nn.ModuleList() lowerCamelCase_ =nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowerCamelCase_ =UperNetConvModule(_SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 ) lowerCamelCase_ =UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(_SCREAMING_SNAKE_CASE ) self.fpn_convs.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def _snake_case ( self )-> List[str]: self.apply(self._init_weights ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict: if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =inputs[-1] lowerCamelCase_ =[x] psp_outs.extend(self.psp_modules(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =torch.cat(_SCREAMING_SNAKE_CASE , dim=1 ) lowerCamelCase_ =self.bottleneck(_SCREAMING_SNAKE_CASE ) return output def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor: # build laterals lowerCamelCase_ =[lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(_SCREAMING_SNAKE_CASE ) ) # build top-down path lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowerCamelCase_ =laterals[i - 1].shape[2:] lowerCamelCase_ =laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=_SCREAMING_SNAKE_CASE , mode="""bilinear""" , align_corners=self.align_corners ) # build outputs lowerCamelCase_ =[self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowerCamelCase_ =nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners ) lowerCamelCase_ =torch.cat(_SCREAMING_SNAKE_CASE , dim=1 ) lowerCamelCase_ =self.fpn_bottleneck(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.classifier(_SCREAMING_SNAKE_CASE ) return output class _SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 1 )-> None: super().__init__() lowerCamelCase_ =config lowerCamelCase_ =config.auxiliary_in_channels lowerCamelCase_ =config.auxiliary_channels lowerCamelCase_ =config.auxiliary_num_convs lowerCamelCase_ =config.auxiliary_concat_input lowerCamelCase_ =in_index lowerCamelCase_ =(kernel_size // 2) * dilation lowerCamelCase_ =[] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) ) if self.num_convs == 0: lowerCamelCase_ =nn.Identity() else: lowerCamelCase_ =nn.Sequential(*_SCREAMING_SNAKE_CASE ) if self.concat_input: lowerCamelCase_ =UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 ) lowerCamelCase_ =nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def _snake_case ( self )-> str: self.apply(self._init_weights ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor: # just take the relevant feature maps lowerCamelCase_ =encoder_hidden_states[self.in_index] lowerCamelCase_ =self.convs(_SCREAMING_SNAKE_CASE ) if self.concat_input: lowerCamelCase_ =self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) lowerCamelCase_ =self.classifier(_SCREAMING_SNAKE_CASE ) return output class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Tuple = UperNetConfig _UpperCamelCase:Tuple = "pixel_values" _UpperCamelCase:Tuple = True def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def _snake_case ( self )-> Union[str, Any]: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Optional[int]: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =value __A : List[str] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __A : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , lowerCAmelCase__ , ) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE )-> Dict: super().__init__(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowerCamelCase_ =UperNetHead(_SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels ) lowerCamelCase_ =UperNetFCNHead(_SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) ) @replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC ) def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )-> Union[tuple, SemanticSegmenterOutput]: lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase_ =output_attentions if output_attentions is not None else self.config.output_attentions lowerCamelCase_ =self.backbone.forward_with_filtered_kwargs( _SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =outputs.feature_maps lowerCamelCase_ =self.decode_head(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =nn.functional.interpolate(_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =None if self.auxiliary_head is not None: lowerCamelCase_ =self.auxiliary_head(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =nn.functional.interpolate( _SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =None if labels is not None: if self.config.num_labels == 1: raise ValueError("""The number of labels should be greater than one""" ) else: # compute weighted loss lowerCamelCase_ =CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowerCamelCase_ =loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowerCamelCase_ =(logits,) + outputs[1:] else: lowerCamelCase_ =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
75
import argparse import os import re __A : Optional[Any] = 'src/diffusers' # Pattern that looks at the indentation in a line. __A : int = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __A : Dict = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __A : Optional[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __A : int = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __A : Optional[Any] = re.compile(R'\[([^\]]+)\]') def __UpperCamelCase ( _A : int ) ->Dict: """simple docstring""" lowerCamelCase_ =_re_indent.search(_A ) return "" if search is None else search.groups()[0] def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[int]="" , _A : int=None , _A : List[str]=None ) ->List[Any]: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 lowerCamelCase_ =["""\n""".join(lines[:index] )] else: lowerCamelCase_ =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase_ =[lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_A ) ) if index < len(_A ) - 1: lowerCamelCase_ =[lines[index + 1]] index += 1 else: lowerCamelCase_ =[] else: blocks.append("""\n""".join(_A ) ) lowerCamelCase_ =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append("""\n""".join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __UpperCamelCase ( _A : Optional[int] ) ->Optional[int]: """simple docstring""" def _inner(_A : Optional[Any] ): return key(_A ).lower().replace("""_""" , """""" ) return _inner def __UpperCamelCase ( _A : int , _A : List[Any]=None ) ->List[str]: """simple docstring""" # If no key is provided, we use a noop. def noop(_A : List[str] ): return x if key is None: lowerCamelCase_ =noop # Constants are all uppercase, they go first. lowerCamelCase_ =[obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase_ =[obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase_ =[obj for obj in objects if not key(_A )[0].isupper()] lowerCamelCase_ =ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def __UpperCamelCase ( _A : List[str] ) ->List[str]: """simple docstring""" # This inner function sort imports between [ ]. def _replace(_A : Optional[Any] ): lowerCamelCase_ =match.groups()[0] if "," not in imports: return f'[{imports}]' lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" lowerCamelCase_ =import_statement.split("""\n""" ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase_ =2 if lines[1].strip() == """[""" else 1 lowerCamelCase_ =[(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase_ =sort_objects(_A , key=lambda _A : x[1] ) lowerCamelCase_ =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase_ =_re_bracket_content.sub(_replace , lines[1] ) else: lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] lowerCamelCase_ =get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase_ =_re_bracket_content.sub(_replace , _A ) return import_statement def __UpperCamelCase ( _A : List[Any] , _A : Optional[Any]=True ) ->str: """simple docstring""" with open(_A , """r""" ) as f: lowerCamelCase_ =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase_ =split_code_in_indented_blocks( _A , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase_ =main_blocks[block_idx] lowerCamelCase_ =block.split("""\n""" ) # Get to the start of the imports. lowerCamelCase_ =0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase_ =len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase_ ="""\n""".join(block_lines[line_idx:-1] ) lowerCamelCase_ =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase_ =split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase_ =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase_ =[(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase_ =[(i, key) for i, key in enumerate(_A ) if key is not None] lowerCamelCase_ =[x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase_ =0 lowerCamelCase_ =[] for i in range(len(_A ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCamelCase_ =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase_ ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , """w""" ) as f: f.write("""\n""".join(_A ) ) def __UpperCamelCase ( _A : str=True ) ->List[Any]: """simple docstring""" lowerCamelCase_ =[] for root, _, files in os.walk(_A ): if "__init__.py" in files: lowerCamelCase_ =sort_imports(os.path.join(_A , """__init__.py""" ) , check_only=_A ) if result: lowerCamelCase_ =[os.path.join(_A , """__init__.py""" )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __A : Optional[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
75
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : str = {'vocab_file': 'sentencepiece.model'} __A : Optional[Any] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } __A : int = { 'google/rembert': 2_56, } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:List[Any] = VOCAB_FILES_NAMES _UpperCamelCase:Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , )-> str: super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =remove_space lowerCamelCase_ =keep_accents lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> Dict: return len(self.sp_model ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self )-> Optional[Any]: lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =d lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: lowerCamelCase_ =self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE ) return pieces def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE ) return out_string def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
75
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available __A : Tuple = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[str]: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) ) self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) ) def _snake_case ( self )-> int: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =get_activation("""gelu_10""" ) lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self )-> Dict: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation("""bogus""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =1 lowerCamelCase_ =get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =acta.a
75
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:List[Any] = "ClapFeatureExtractor" _UpperCamelCase:List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any: super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> str: lowerCamelCase_ =kwargs.pop("""sampling_rate""" , _SCREAMING_SNAKE_CASE ) if text is None and audios is None: raise ValueError("""You have to specify either text or audios. Both cannot be none.""" ) if text is not None: lowerCamelCase_ =self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if audios is not None: lowerCamelCase_ =self.feature_extractor( _SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None and audios is not None: lowerCamelCase_ =audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Tuple: return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[str]: return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> Dict: lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
75
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __UpperCamelCase ( ) ->List[str]: """simple docstring""" lowerCamelCase_ =_ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ =get_sagemaker_input() else: lowerCamelCase_ =get_cluster_input() return config def __UpperCamelCase ( _A : List[str]=None ) ->str: """simple docstring""" if subparsers is not None: lowerCamelCase_ =subparsers.add_parser("""config""" , description=_A ) else: lowerCamelCase_ =argparse.ArgumentParser("""Accelerate config command""" , description=_A ) parser.add_argument( """--config_file""" , default=_A , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =get_user_input() if args.config_file is not None: lowerCamelCase_ =args.config_file else: if not os.path.isdir(_A ): os.makedirs(_A ) lowerCamelCase_ =default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(_A ) else: config.to_yaml_file(_A ) print(f'accelerate configuration saved at {config_file}' ) def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =config_command_parser() lowerCamelCase_ =parser.parse_args() config_command(_A ) if __name__ == "__main__": main()
75
1
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __A : List[Any] = logging.get_logger(__name__) @add_end_docstrings( lowerCAmelCase__ , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> np.ndarray: if self.framework == "tf": lowerCamelCase_ =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCamelCase_ =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE ) else: raise ValueError("""Unsupported framework""" ) return masked_index def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> np.ndarray: lowerCamelCase_ =self.get_masked_index(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =np.prod(masked_index.shape ) if numel < 1: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> Dict[str, GenericTensor]: if return_tensors is None: lowerCamelCase_ =self.framework lowerCamelCase_ =self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) self.ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE ) return model_inputs def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =self.model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =model_inputs["""input_ids"""] return model_outputs def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=None )-> Tuple: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCamelCase_ =target_ids.shape[0] lowerCamelCase_ =model_outputs["""input_ids"""][0] lowerCamelCase_ =model_outputs["""logits"""] if self.framework == "tf": lowerCamelCase_ =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCamelCase_ =outputs.numpy() lowerCamelCase_ =outputs[0, masked_index, :] lowerCamelCase_ =stable_softmax(_SCREAMING_SNAKE_CASE , axis=-1 ) if target_ids is not None: lowerCamelCase_ =tf.gather_nd(tf.squeeze(_SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) ) lowerCamelCase_ =tf.expand_dims(_SCREAMING_SNAKE_CASE , 0 ) lowerCamelCase_ =tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ , lowerCamelCase_ =topk.values.numpy(), topk.indices.numpy() else: lowerCamelCase_ =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCamelCase_ =outputs[0, masked_index, :] lowerCamelCase_ =logits.softmax(dim=-1 ) if target_ids is not None: lowerCamelCase_ =probs[..., target_ids] lowerCamelCase_ , lowerCamelCase_ =probs.topk(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[] lowerCamelCase_ =values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): lowerCamelCase_ =[] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place lowerCamelCase_ =input_ids.numpy().copy() if target_ids is not None: lowerCamelCase_ =target_ids[p].tolist() lowerCamelCase_ =p # Filter padding out: lowerCamelCase_ =tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCamelCase_ =self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence} row.append(_SCREAMING_SNAKE_CASE ) result.append(_SCREAMING_SNAKE_CASE ) if single_mask: return result[0] return result def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> int: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =[targets] try: lowerCamelCase_ =self.tokenizer.get_vocab() except Exception: lowerCamelCase_ ={} lowerCamelCase_ =[] for target in targets: lowerCamelCase_ =vocab.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if id_ is None: lowerCamelCase_ =self.tokenizer( _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , max_length=1 , truncation=_SCREAMING_SNAKE_CASE , )["""input_ids"""] if len(_SCREAMING_SNAKE_CASE ) == 0: logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' """We cannot replace it with anything meaningful, ignoring it""" ) continue lowerCamelCase_ =input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' ) target_ids.append(id_ ) lowerCamelCase_ =list(set(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError("""At least one target must be provided when passed.""" ) lowerCamelCase_ =np.array(_SCREAMING_SNAKE_CASE ) return target_ids def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> int: lowerCamelCase_ ={} if targets is not None: lowerCamelCase_ =self.get_target_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =target_ids if top_k is not None: lowerCamelCase_ =top_k if self.tokenizer.mask_token_id is None: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" ) return {}, {}, postprocess_params def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str: lowerCamelCase_ =super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1: return outputs[0] return outputs
75
def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[[] for _ in range(_A )] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(_A ) <= key: return input_string for position, character in enumerate(_A ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_A ) lowerCamelCase_ =["""""".join(_A ) for row in temp_grid] lowerCamelCase_ ="""""".join(_A ) return output_string def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string lowerCamelCase_ =[[] for _ in range(_A )] # generates template for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) lowerCamelCase_ =0 for row in temp_grid: # fills in the characters lowerCamelCase_ =input_string[counter : counter + len(_A )] grid.append(list(_A ) ) counter += len(_A ) lowerCamelCase_ ="""""" # reads as zigzag for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __UpperCamelCase ( _A : str ) ->dict[int, str]: """simple docstring""" lowerCamelCase_ ={} for key_guess in range(1 , len(_A ) ): # tries every key lowerCamelCase_ =decrypt(_A , _A ) return results if __name__ == "__main__": import doctest doctest.testmod()
75
1
from ...configuration_utils import PretrainedConfig __A : Any = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[int] = "tapas" def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=[3, 256, 256, 2, 256, 256, 10] , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1_0.0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="ratio" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> Tuple: super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_sizes lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps # Fine-tuning task hyperparameters lowerCamelCase_ =positive_label_weight lowerCamelCase_ =num_aggregation_labels lowerCamelCase_ =aggregation_loss_weight lowerCamelCase_ =use_answer_as_supervision lowerCamelCase_ =answer_loss_importance lowerCamelCase_ =use_normalized_answer_loss lowerCamelCase_ =huber_loss_delta lowerCamelCase_ =temperature lowerCamelCase_ =aggregation_temperature lowerCamelCase_ =use_gumbel_for_cells lowerCamelCase_ =use_gumbel_for_aggregation lowerCamelCase_ =average_approximation_function lowerCamelCase_ =cell_selection_preference lowerCamelCase_ =answer_loss_cutoff lowerCamelCase_ =max_num_rows lowerCamelCase_ =max_num_columns lowerCamelCase_ =average_logits_per_cell lowerCamelCase_ =select_one_column lowerCamelCase_ =allow_empty_column_selection lowerCamelCase_ =init_cell_selection_weights_to_zero lowerCamelCase_ =reset_position_index_per_cell lowerCamelCase_ =disable_per_token_loss # Aggregation hyperparameters lowerCamelCase_ =aggregation_labels lowerCamelCase_ =no_aggregation_label_index if isinstance(self.aggregation_labels , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ ={int(_SCREAMING_SNAKE_CASE ): v for k, v in aggregation_labels.items()}
75
from typing import Any class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =data lowerCamelCase_ =None class _SCREAMING_SNAKE_CASE : def __init__( self )-> Any: lowerCamelCase_ =None def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.head while temp is not None: print(temp.data , end=""" """ ) lowerCamelCase_ =temp.next print() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =Node(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.head lowerCamelCase_ =new_node def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if node_data_a == node_data_a: return else: lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next if node_a is None or node_a is None: return lowerCamelCase_ , lowerCamelCase_ =node_a.data, node_a.data if __name__ == "__main__": __A : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
75
1
from __future__ import annotations def __UpperCamelCase ( _A : str ) ->list[int]: """simple docstring""" return [ord(_A ) - 96 for elem in plain] def __UpperCamelCase ( _A : list[int] ) ->str: """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def __UpperCamelCase ( ) ->None: """simple docstring""" lowerCamelCase_ =encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , _A ) print("""Decoded:""" , decode(_A ) ) if __name__ == "__main__": main()
75
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Dict = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = "yolos" def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=[512, 864] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , )-> Tuple: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =qkv_bias lowerCamelCase_ =num_detection_tokens lowerCamelCase_ =use_mid_position_embeddings lowerCamelCase_ =auxiliary_loss # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = version.parse("1.11") @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self )-> float: return 1E-4 @property def _snake_case ( self )-> int: return 12
75
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __A : Any = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __A : int = 'sshleifer/student_marian_en_ro_6_1' __A : Dict = 'sshleifer/tiny-mbart' @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def _snake_case ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , )-> Tuple: lowerCamelCase_ =self.run_trainer( eval_steps=1 , max_len=12 , model_name=_SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=_SCREAMING_SNAKE_CASE , extra_args_str=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , do_predict=_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =TrainerState.load_from_json(os.path.join(_SCREAMING_SNAKE_CASE , """trainer_state.json""" ) ).log_history if not do_eval: return lowerCamelCase_ =[log for log in logs if """eval_loss""" in log.keys()] lowerCamelCase_ =eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowerCamelCase_ =eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , _SCREAMING_SNAKE_CASE ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _snake_case ( self )-> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def _snake_case ( self )-> Optional[Any]: self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE ) @require_torch_multi_gpu def _snake_case ( self )-> List[str]: self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _snake_case ( self )-> Dict: self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _snake_case ( self )-> List[str]: self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _snake_case ( self )-> List[Any]: self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=_SCREAMING_SNAKE_CASE ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _snake_case ( self )-> Optional[Any]: self.run_seqaseq_quick( distributed=_SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=_SCREAMING_SNAKE_CASE ) @require_apex @require_torch_gpu def _snake_case ( self )-> List[str]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout lowerCamelCase_ ={ # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } lowerCamelCase_ =experiments[experiment_id] lowerCamelCase_ ={"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} lowerCamelCase_ ="""Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**_SCREAMING_SNAKE_CASE , extra_args_str=data["""extra_args_str"""] ) lowerCamelCase_ =len(re.findall(_SCREAMING_SNAKE_CASE , cl.err ) ) self.assertEqual(_SCREAMING_SNAKE_CASE , data["""n_matches"""] ) @slow def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =self.run_trainer( eval_steps=2 , max_len=128 , model_name=_SCREAMING_SNAKE_CASE , learning_rate=3E-4 , num_train_epochs=10 , distributed=_SCREAMING_SNAKE_CASE , ) # Check metrics lowerCamelCase_ =TrainerState.load_from_json(os.path.join(_SCREAMING_SNAKE_CASE , """trainer_state.json""" ) ).log_history lowerCamelCase_ =[log for log in logs if """eval_loss""" in log.keys()] lowerCamelCase_ =eval_metrics[0] lowerCamelCase_ =eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , _SCREAMING_SNAKE_CASE ) # test if do_predict saves generations and metrics lowerCamelCase_ =os.listdir(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={os.path.basename(_SCREAMING_SNAKE_CASE ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _snake_case ( self )-> Optional[Any]: from transformers.training_args import OptimizerNames def train_and_return_metrics(_SCREAMING_SNAKE_CASE ) -> Tuple[int, float]: lowerCamelCase_ ="""--skip_memory_metrics 0""" lowerCamelCase_ =self.run_trainer( max_len=128 , model_name=_SCREAMING_SNAKE_CASE , learning_rate=3E-4 , num_train_epochs=1 , optim=_SCREAMING_SNAKE_CASE , distributed=_SCREAMING_SNAKE_CASE , extra_args_str=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , do_predict=_SCREAMING_SNAKE_CASE , n_gpus_to_use=1 , ) # Check metrics lowerCamelCase_ =TrainerState.load_from_json(Path(_SCREAMING_SNAKE_CASE , """trainer_state.json""" ) ).log_history lowerCamelCase_ =int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) lowerCamelCase_ =int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) lowerCamelCase_ =logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) lowerCamelCase_ =gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowerCamelCase_ =gpu_peak_mem_orig + gpu_alloc_mem_orig lowerCamelCase_ =gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowerCamelCase_ =gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowerCamelCase_ =120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" f' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' f' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' , ) self.assertGreater( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" f' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' f' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' , ) self.assertEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3E-3 , _SCREAMING_SNAKE_CASE = "adafactor" , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , )-> List[str]: lowerCamelCase_ =self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" lowerCamelCase_ =self.get_auto_remove_tmp_dir() lowerCamelCase_ =f'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_SCREAMING_SNAKE_CASE )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() lowerCamelCase_ =f'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_SCREAMING_SNAKE_CASE )}\n '.split() lowerCamelCase_ =""" --do_predict """.split() lowerCamelCase_ =[] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowerCamelCase_ =get_gpu_count() lowerCamelCase_ =get_torch_dist_unique_port() lowerCamelCase_ =f'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() lowerCamelCase_ =[sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env() ) else: lowerCamelCase_ =["""run_translation.py"""] + args with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ): main() return output_dir
75
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __A : List[Any] = 'src/transformers' __A : Tuple = 'docs/source/en' __A : Optional[int] = '.' def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : Optional[Any] ) ->Optional[Any]: """simple docstring""" with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase_ =f.readlines() # Find the start prompt. lowerCamelCase_ =0 while not lines[start_index].startswith(_A ): start_index += 1 start_index += 1 lowerCamelCase_ =start_index while not lines[end_index].startswith(_A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __A : Dict = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. __A : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') __A : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __A : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # This is to make sure the transformers module imported is the one in the repo. __A : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) def __UpperCamelCase ( _A : List[Any] ) ->str: """simple docstring""" lowerCamelCase_ =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _A ) return [m.group(0 ) for m in matches] def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =2 if text == """✅""" or text == """❌""" else len(_A ) lowerCamelCase_ =(width - text_length) // 2 lowerCamelCase_ =width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __UpperCamelCase ( ) ->Any: """simple docstring""" lowerCamelCase_ =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowerCamelCase_ ={ name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowerCamelCase_ ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) # Let's lookup through all transformers object (once). for attr_name in dir(_A ): lowerCamelCase_ =None if attr_name.endswith("""Tokenizer""" ): lowerCamelCase_ =slow_tokenizers lowerCamelCase_ =attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowerCamelCase_ =fast_tokenizers lowerCamelCase_ =attr_name[:-13] elif _re_tf_models.match(_A ) is not None: lowerCamelCase_ =tf_models lowerCamelCase_ =_re_tf_models.match(_A ).groups()[0] elif _re_flax_models.match(_A ) is not None: lowerCamelCase_ =flax_models lowerCamelCase_ =_re_flax_models.match(_A ).groups()[0] elif _re_pt_models.match(_A ) is not None: lowerCamelCase_ =pt_models lowerCamelCase_ =_re_pt_models.match(_A ).groups()[0] if lookup_dict is not None: while len(_A ) > 0: if attr_name in model_name_to_prefix.values(): lowerCamelCase_ =True break # Try again after removing the last word in the name lowerCamelCase_ ="""""".join(camel_case_split(_A )[:-1] ) # Let's build that table! lowerCamelCase_ =list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowerCamelCase_ =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowerCamelCase_ =[len(_A ) + 2 for c in columns] lowerCamelCase_ =max([len(_A ) for name in model_names] ) + 2 # Build the table per se lowerCamelCase_ ="""|""" + """|""".join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowerCamelCase_ ={True: """✅""", False: """❌"""} for name in model_names: lowerCamelCase_ =model_name_to_prefix[name] lowerCamelCase_ =[ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n" return table def __UpperCamelCase ( _A : str=False ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_find_text_in_file( filename=os.path.join(_A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowerCamelCase_ =get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __A : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
75
1
from datetime import datetime as dt import os from github import Github __A : List[str] = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase_ =g.get_repo("""huggingface/transformers""" ) lowerCamelCase_ =repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase_ =sorted([comment for comment in issue.get_comments()] , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase_ =comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
75
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =num_stages lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =out_features lowerCamelCase_ =num_labels lowerCamelCase_ =scope lowerCamelCase_ =num_stages def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def _snake_case ( self )-> List[Any]: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _snake_case ( self )-> Union[str, Any]: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _snake_case ( self )-> str: lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =config_and_inputs lowerCamelCase_ ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else () _UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Dict = False _UpperCamelCase:int = False _UpperCamelCase:Any = False _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Optional[Any] = False def _snake_case ( self )-> int: lowerCamelCase_ =UperNetModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )-> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )-> Tuple: return def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _snake_case ( self )-> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _snake_case ( self )-> str: pass def _snake_case ( self )-> Optional[int]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _snake_case ( self )-> Dict: pass @slow def _snake_case ( self )-> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( ) ->Tuple: """simple docstring""" lowerCamelCase_ =hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) lowerCamelCase_ =Image.open(_A ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[Any]: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) def _snake_case ( self )-> int: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
75
1
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 )-> List[str]: lowerCamelCase_ =1.0 if scale is None else scale lowerCamelCase_ =0.0 if loc is None else loc super().__init__(_SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_SCREAMING_SNAKE_CASE )] ) @property def _snake_case ( self )-> Union[str, Any]: return self.base_dist.mean * self.scale + self.loc @property def _snake_case ( self )-> int: return self.base_dist.variance * self.scale**2 @property def _snake_case ( self )-> Dict: return self.variance.sqrt() class _SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =args_dim lowerCamelCase_ =nn.ModuleList([nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) lowerCamelCase_ =domain_map def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple[torch.Tensor]: lowerCamelCase_ =[proj(_SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*_SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE )-> List[str]: super().__init__() lowerCamelCase_ =function def _snake_case ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )-> Any: return self.function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE : _UpperCamelCase:type _UpperCamelCase:int _UpperCamelCase:Dict[str, int] def __init__( self , _SCREAMING_SNAKE_CASE = 1 )-> None: lowerCamelCase_ =dim lowerCamelCase_ ={k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str: if self.dim == 1: return self.distribution_class(*_SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*_SCREAMING_SNAKE_CASE ) , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )-> Distribution: lowerCamelCase_ =self._base_distribution(_SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(_SCREAMING_SNAKE_CASE , loc=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def _snake_case ( self )-> Tuple: return () if self.dim == 1 else (self.dim,) @property def _snake_case ( self )-> int: return len(self.event_shape ) @property def _snake_case ( self )-> float: return 0.0 def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> nn.Module: return ParameterProjection( in_features=_SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case ( self , *_SCREAMING_SNAKE_CASE )-> Tuple: raise NotImplementedError() @staticmethod def _snake_case ( _SCREAMING_SNAKE_CASE )-> torch.Tensor: return (x + torch.sqrt(torch.square(_SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} _UpperCamelCase:type = StudentT @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) lowerCamelCase_ =2.0 + cls.squareplus(_SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Dict[str, int] = {"loc": 1, "scale": 1} _UpperCamelCase:type = Normal @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any: lowerCamelCase_ =cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Dict[str, int] = {"total_count": 1, "logits": 1} _UpperCamelCase:type = NegativeBinomial @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any: lowerCamelCase_ =cls.squareplus(_SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Distribution: lowerCamelCase_ , lowerCamelCase_ =distr_args if self.dim == 1: return self.distribution_class(total_count=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE ) , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None )-> Distribution: lowerCamelCase_ , lowerCamelCase_ =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
75
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = ["pixel_values"] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] ) lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( _SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature: lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ ={"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
75
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[List[PIL.Image.Image], np.ndarray] _UpperCamelCase:Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('>=', '0.0.12') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:np.ndarray _UpperCamelCase:List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
75
# Imports import numpy as np class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: if red is not None: lowerCamelCase_ =red if green is not None: lowerCamelCase_ =green if blue is not None: lowerCamelCase_ =blue if red_edge is not None: lowerCamelCase_ =red_edge if nir is not None: lowerCamelCase_ =nir return True def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={ """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def _snake_case ( self )-> Optional[Any]: return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case ( self )-> Tuple: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case ( self )-> str: return self.nir * (self.red / (self.green**2)) def _snake_case ( self )-> Optional[int]: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case ( self )-> Tuple: return (self.nir - self.red) / (self.nir + self.red) def _snake_case ( self )-> Dict: return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case ( self )-> List[Any]: return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case ( self )-> Tuple: return (self.nir - self.green) / (self.nir + self.green) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case ( self )-> Tuple: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case ( self )-> Any: return (self.nir / self.green) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.nir / self.redEdge) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.red - self.blue) / self.red def _snake_case ( self )-> Dict: lowerCamelCase_ =self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case ( self )-> int: return self.nir - self.green def _snake_case ( self )-> Dict: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case ( self )-> List[str]: lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]: return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case ( self )-> int: return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: return (self.nir - b) / (a * self.red) def _snake_case ( self )-> int: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case ( self )-> Optional[Any]: return (self.red + self.green + self.blue) / 3_0.5 def _snake_case ( self )-> List[str]: return self.nir / self.red def _snake_case ( self )-> List[str]: return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case ( self )-> str: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case ( self )-> List[Any]: return self.green / (self.nir + self.red + self.green) def _snake_case ( self )-> Dict: return self.nir / (self.nir + self.red + self.green) def _snake_case ( self )-> List[str]: return self.red / (self.nir + self.red + self.green) def _snake_case ( self )-> int: return (self.green - self.red) / (self.green + self.red) def _snake_case ( self )-> str: return (self.red - self.green) / (self.red + self.green) def _snake_case ( self )-> str: lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case ( self )-> List[str]: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case ( self )-> List[Any]: return self.nir / self.red def _snake_case ( self )-> Optional[int]: return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case ( self )-> str: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
75
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: lowerCamelCase_ =dataset lowerCamelCase_ =process lowerCamelCase_ =params def __len__( self )-> str: return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE )-> Dict: lowerCamelCase_ =self.dataset[i] lowerCamelCase_ =self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> List[str]: lowerCamelCase_ =loader lowerCamelCase_ =infer lowerCamelCase_ =params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase_ =None lowerCamelCase_ =loader_batch_size # Internal bookkeeping lowerCamelCase_ =None lowerCamelCase_ =None def __len__( self )-> List[Any]: return len(self.loader ) def __iter__( self )-> Tuple: lowerCamelCase_ =iter(self.loader ) return self def _snake_case ( self )-> int: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase_ =self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase_ ={} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first lowerCamelCase_ =element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase_ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase_ =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase_ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase_ =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase_ =None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase_ =element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase_ =np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase_ =element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase_ =self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def _snake_case ( self )-> Union[str, Any]: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase_ =next(self.iterator ) lowerCamelCase_ =self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): lowerCamelCase_ =processed else: lowerCamelCase_ =list(processed.keys() )[0] lowerCamelCase_ =processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase_ =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase_ =observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase_ =processed lowerCamelCase_ =0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> List[Any]: super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self )-> int: lowerCamelCase_ =iter(self.loader ) lowerCamelCase_ =None return self def _snake_case ( self )-> Optional[Any]: if self.subiterator is None: lowerCamelCase_ =self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase_ =next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase_ =self.infer(next(self.iterator ) , **self.params ) lowerCamelCase_ =next(self.subiterator ) return processed class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __iter__( self )-> Any: lowerCamelCase_ =iter(self.loader ) return self def _snake_case ( self )-> List[str]: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase_ =False lowerCamelCase_ =[] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase_ =self.loader_batch_item() lowerCamelCase_ =item.pop("""is_last""" ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: lowerCamelCase_ =self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): lowerCamelCase_ =processed else: lowerCamelCase_ =list(processed.keys() )[0] lowerCamelCase_ =processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase_ =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase_ =observed_batch_size lowerCamelCase_ =processed lowerCamelCase_ =0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase_ =self.loader_batch_item() lowerCamelCase_ =item.pop("""is_last""" ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: lowerCamelCase_ =processed lowerCamelCase_ =item.pop("""is_last""" ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =dataset lowerCamelCase_ =key def __len__( self )-> Optional[int]: return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE )-> Any: return self.dataset[i][self.key] class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any: lowerCamelCase_ =dataset lowerCamelCase_ =keya lowerCamelCase_ =keya def __len__( self )-> List[Any]: return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
75
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
1
from graphs.minimum_spanning_tree_kruskal import kruskal def __UpperCamelCase ( ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ =9 lowerCamelCase_ =[ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowerCamelCase_ =kruskal(_A , _A ) lowerCamelCase_ =[ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_A ) == sorted(_A )
75
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='%(message)s') def __UpperCamelCase ( _A : np.ndarray ) ->np.ndarray: """simple docstring""" return input_array.reshape((input_array.size, 1) ) def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.mean(1 ) # Centralize the data of class i lowerCamelCase_ =data - column_reshape(_A ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_A , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =np.dot(_A , centered_data.T ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =features.mean(1 ) lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.shape[1] lowerCamelCase_ =data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" # Check if the features have been loaded if features.any(): lowerCamelCase_ =features.mean(1 ) # Center the dataset lowerCamelCase_ =features - np.reshape(_A , (data_mean.size, 1) ) lowerCamelCase_ =np.dot(_A , centered_data.T ) / features.shape[1] lowerCamelCase_ , lowerCamelCase_ =np.linalg.eigh(_A ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCamelCase_ =np.dot(filtered_eigenvectors.T , _A ) logging.info("""Principal Component Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int ) ->np.ndarray: """simple docstring""" assert classes > dimensions # Check if features have been already loaded if features.any: lowerCamelCase_ , lowerCamelCase_ =eigh( covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , ) lowerCamelCase_ =eigenvectors[:, ::-1][:, :dimensions] lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =np.linalg.svd(_A ) lowerCamelCase_ =svd_matrix[:, 0:dimensions] lowerCamelCase_ =np.dot(filtered_svd_matrix.T , _A ) logging.info("""Linear Discriminant Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" # Create dummy dataset with 2 classes and 3 features lowerCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCamelCase_ =np.array([0, 0, 0, 1, 1] ) lowerCamelCase_ =2 lowerCamelCase_ =2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_A ) as error_info: lowerCamelCase_ =linear_discriminant_analysis( _A , _A , _A , _A ) if isinstance(_A , np.ndarray ): raise AssertionError( """Did not raise AssertionError for dimensions > classes""" ) assert error_info.type is AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" lowerCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCamelCase_ =2 lowerCamelCase_ =np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(_A ) as error_info: lowerCamelCase_ =principal_component_analysis(_A , _A ) if not np.allclose(_A , _A ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
75
1
from __future__ import annotations from math import pow, sqrt def __UpperCamelCase ( _A : float , _A : float , _A : float ) ->dict[str, float]: """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(_A , 2 ) - pow(_A , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(_A , 2 ) - pow(_A , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(_A , 2 ) + pow(_A , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
75
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __A : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') __A : str = F"""https://www.google.com/search?q={query}&num=100""" __A : int = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: __A : str = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: __A : Any = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
75
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __A : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __A : Union[str, Any] = 12_80_22 __A : List[str] = 12_80_28 @require_sentencepiece class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Dict = MaMaaaTokenizer _UpperCamelCase:List[str] = False _UpperCamelCase:int = False _UpperCamelCase:List[Any] = True def _snake_case ( self )-> Optional[Any]: super().setUp() lowerCamelCase_ =["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] lowerCamelCase_ =dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["""spm_file"""] ) lowerCamelCase_ =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Any: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str: return ( "This is a test", "This is a test", ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ ="""</s>""" lowerCamelCase_ =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> int: lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<s>""" ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("""Skip this test while all models are still to be uploaded.""" ) def _snake_case ( self )-> List[str]: pass def _snake_case ( self )-> Any: lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) lowerCamelCase_ =tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , """This is a test""" ) @slow def _snake_case ( self )-> Optional[Any]: # fmt: off lowerCamelCase_ ={"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , ) @require_torch @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( unittest.TestCase): _UpperCamelCase:Union[str, Any] = "facebook/m2m100_418M" _UpperCamelCase:Optional[Any] = [ "In my opinion, there are two levels of response from the French government.", "NSA Affair Emphasizes Complete Lack of Debate on Intelligence", ] _UpperCamelCase:Optional[int] = [ "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "L'affaire NSA souligne l'absence totale de débat sur le renseignement", ] # fmt: off _UpperCamelCase:Union[str, Any] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def _snake_case ( cls )-> Union[str, Any]: lowerCamelCase_ =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" ) lowerCamelCase_ =1 return cls def _snake_case ( self )-> Tuple: self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_8006 ) self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_8022 ) self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_8076 ) self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_8063 ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ =self.tokenizer.get_vocab() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["""<unk>"""] , 3 ) self.assertIn(self.tokenizer.get_lang_token("""en""" ) , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ ="""en""" lowerCamelCase_ =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids ) # fmt: off lowerCamelCase_ =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on lowerCamelCase_ =self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> int: lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE ) @require_torch def _snake_case ( self )-> Tuple: lowerCamelCase_ ="""en""" lowerCamelCase_ ="""fr""" lowerCamelCase_ =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) lowerCamelCase_ =shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowerCamelCase_ =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _snake_case ( self )-> Dict: lowerCamelCase_ ="""mr""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowerCamelCase_ ="""zh""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _snake_case ( self )-> List[Any]: lowerCamelCase_ ="""mr""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowerCamelCase_ ="""zh""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _snake_case ( self )-> Any: lowerCamelCase_ =self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE ) , { # en_XX, A, test, EOS """input_ids""": [[12_8022, 58, 4183, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 12_8006, } , )
75
from ..utils import DummyObject, requires_backends class _SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase__): _UpperCamelCase:List[Any] = ["torch", "torchsde"] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[Any]: requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]: requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str: requires_backends(cls , ["""torch""", """torchsde"""] )
75
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Tuple = { 'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'], 'convert_funnel_original_tf_checkpoint_to_pytorch': [], 'tokenization_funnel': ['FunnelTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['FunnelTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'FunnelBaseModel', 'FunnelForMaskedLM', 'FunnelForMultipleChoice', 'FunnelForPreTraining', 'FunnelForQuestionAnswering', 'FunnelForSequenceClassification', 'FunnelForTokenClassification', 'FunnelModel', 'FunnelPreTrainedModel', 'load_tf_weights_in_funnel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFFunnelBaseModel', 'TFFunnelForMaskedLM', 'TFFunnelForMultipleChoice', 'TFFunnelForPreTraining', 'TFFunnelForQuestionAnswering', 'TFFunnelForSequenceClassification', 'TFFunnelForTokenClassification', 'TFFunnelModel', 'TFFunnelPreTrainedModel', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
from collections import namedtuple import requests from lxml import html # type: ignore __A : Dict = namedtuple('covid_data', 'cases deaths recovered') def __UpperCamelCase ( _A : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data: """simple docstring""" lowerCamelCase_ ="""//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) ) __A : Union[str, Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
75
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Dict = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = "yolos" def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=[512, 864] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , )-> Tuple: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =qkv_bias lowerCamelCase_ =num_detection_tokens lowerCamelCase_ =use_mid_position_embeddings lowerCamelCase_ =auxiliary_loss # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = version.parse("1.11") @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self )-> float: return 1E-4 @property def _snake_case ( self )-> int: return 12
75
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): @slow @require_torch def _snake_case ( self )-> str: lowerCamelCase_ =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) lowerCamelCase_ =BertTokenizer.from_pretrained("""bert-base-uncased""" ) lowerCamelCase_ =bertabert.config.encoder.vocab_size lowerCamelCase_ =tokenizer.sep_token_id lowerCamelCase_ =tokenizer.cls_token_id lowerCamelCase_ =128 lowerCamelCase_ =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) lowerCamelCase_ =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) lowerCamelCase_ =train_dataset.select(range(32 ) ) lowerCamelCase_ =val_dataset.select(range(16 ) ) lowerCamelCase_ =4 def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ): # Tokenizer will automatically set [BOS] <text> [EOS] lowerCamelCase_ =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 ) lowerCamelCase_ =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 ) lowerCamelCase_ =inputs.input_ids lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =outputs.input_ids lowerCamelCase_ =outputs.input_ids.copy() lowerCamelCase_ =[ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] lowerCamelCase_ =outputs.attention_mask assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =pred.label_ids lowerCamelCase_ =pred.predictions # all unnecessary tokens are removed lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowerCamelCase_ =train_dataset.map( _map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset lowerCamelCase_ =val_dataset.map( _map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) lowerCamelCase_ =self.get_auto_remove_tmp_dir() lowerCamelCase_ =SeqaSeqTrainingArguments( output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowerCamelCase_ =SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # start training trainer.train()
75
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict: """simple docstring""" return float((preds == labels).mean() ) def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]: """simple docstring""" lowerCamelCase_ =simple_accuracy(_A , _A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) ) return { "accuracy": acc, "f1": fa, } def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int: """simple docstring""" lowerCamelCase_ ={} for id_pred, label in zip(_A , _A ): lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' lowerCamelCase_ =id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCamelCase_ =[(pred, label)] lowerCamelCase_ , lowerCamelCase_ =[], [] for question, preds_labels in question_map.items(): lowerCamelCase_ , lowerCamelCase_ =zip(*_A ) lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" ) fas.append(_A ) lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) ) ems.append(_A ) lowerCamelCase_ =float(sum(_A ) / len(_A ) ) lowerCamelCase_ =sum(_A ) / len(_A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _SCREAMING_SNAKE_CASE ( datasets.Metric): def _snake_case ( self )-> Union[str, Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _snake_case ( self )-> Optional[Any]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif self.config_name == "cb": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" ) elif self.config_name == "record": lowerCamelCase_ =[ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] elif self.config_name == "multirc": return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
75
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class _SCREAMING_SNAKE_CASE ( _lowerCamelCase): _UpperCamelCase:Dict = "rwkv" _UpperCamelCase:str = {"max_position_embeddings": "context_length"} def __init__( self , _SCREAMING_SNAKE_CASE=5_0277 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )-> Any: lowerCamelCase_ =vocab_size lowerCamelCase_ =context_length lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase_ =intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase_ =layer_norm_epsilon lowerCamelCase_ =rescale_every lowerCamelCase_ =use_cache lowerCamelCase_ =bos_token_id lowerCamelCase_ =eos_token_id super().__init__( tie_word_embeddings=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
700
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : Union[str, Any] = logging.get_logger(__name__) __A : Optional[Any] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = "deta" _UpperCamelCase:int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , )-> str: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase_ =CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =backbone_config.pop("""model_type""" ) lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ =config_class.from_dict(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =backbone_config lowerCamelCase_ =num_queries lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =init_xavier_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =auxiliary_loss lowerCamelCase_ =position_embedding_type # deformable attributes lowerCamelCase_ =num_feature_levels lowerCamelCase_ =encoder_n_points lowerCamelCase_ =decoder_n_points lowerCamelCase_ =two_stage lowerCamelCase_ =two_stage_num_proposals lowerCamelCase_ =with_box_refine lowerCamelCase_ =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =mask_loss_coefficient lowerCamelCase_ =dice_loss_coefficient lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient lowerCamelCase_ =focal_alpha super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> int: return self.encoder_attention_heads @property def _snake_case ( self )-> int: return self.d_model def _snake_case ( self )-> str: lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.backbone_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
75
0
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __A : Optional[int] = [ 'python', 'tqdm', 'regex', 'requests', 'packaging', 'filelock', 'numpy', 'tokenizers', 'huggingface-hub', 'safetensors', 'accelerate', 'pyyaml', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __UpperCamelCase ( _A : Optional[int] , _A : List[Any]=None ) ->Tuple: """simple docstring""" require_version(deps[pkg] , __A )
701
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig __A : int = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Any = "albert" def __init__( self , _SCREAMING_SNAKE_CASE=3_0000 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=1_6384 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , **_SCREAMING_SNAKE_CASE , )-> Optional[int]: super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =vocab_size lowerCamelCase_ =embedding_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_hidden_groups lowerCamelCase_ =num_attention_heads lowerCamelCase_ =inner_group_num lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =position_embedding_type class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ ={0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCamelCase_ ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
75
0
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __UpperCamelCase ( _A : Optional[int] , _A : List[str]=10 ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =[] for _ in range(_SCREAMING_SNAKE_CASE ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __UpperCamelCase ( _A : List[str] , _A : int=10 ) ->int: """simple docstring""" lowerCamelCase_ =[] for step in range(_SCREAMING_SNAKE_CASE ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ =os.path.join(_SCREAMING_SNAKE_CASE , """schedule.bin""" ) torch.save(scheduler.state_dict() , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.load(_SCREAMING_SNAKE_CASE ) scheduler.load_state_dict(_SCREAMING_SNAKE_CASE ) return lrs @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ ) def _snake_case ( self )-> Dict: lowerCamelCase_ =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ ) lowerCamelCase_ =torch.tensor([0.4, 0.2, -0.5] ) lowerCamelCase_ =nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCamelCase_ =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): lowerCamelCase_ =criterion(UpperCAmelCase_ , UpperCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ ) lowerCamelCase_ =torch.tensor([0.4, 0.2, -0.5] ) lowerCamelCase_ =nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCamelCase_ =Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase_ , weight_decay=0.0 , relative_step=UpperCAmelCase_ , scale_parameter=UpperCAmelCase_ , warmup_init=UpperCAmelCase_ , ) for _ in range(1000 ): lowerCamelCase_ =criterion(UpperCAmelCase_ , UpperCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): _UpperCamelCase:int = nn.Linear(50 , 50) if is_torch_available() else None _UpperCamelCase:Tuple = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None _UpperCamelCase:int = 10 def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ , msg=UpperCAmelCase_ ) def _snake_case ( self )-> Any: lowerCamelCase_ ={"""num_warmup_steps""": 2, """num_training_steps""": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCamelCase_ ={ get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"""num_warmup_steps""": 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, """num_cycles""": 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, """power""": 2.0, """lr_end""": 1E-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {"""num_warmup_steps""": 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): lowerCamelCase_ , lowerCamelCase_ =data lowerCamelCase_ =scheduler_func(self.optimizer , **UpperCAmelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCamelCase_ =unwrap_schedule(UpperCAmelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase_ , UpperCAmelCase_ , tol=1E-2 , msg=f'failed for {scheduler_func} in normal scheduler' , ) lowerCamelCase_ =scheduler_func(self.optimizer , **UpperCAmelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase_ ) # wrap to test picklability of the schedule lowerCamelCase_ =unwrap_and_save_reload_schedule(UpperCAmelCase_ , self.num_steps ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ , msg=f'failed for {scheduler_func} in save and reload' ) class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str: return self.fn(*UpperCAmelCase_ , **UpperCAmelCase_ ) @classmethod def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]: lowerCamelCase_ =list(map(self , scheduler.lr_lambdas ) )
702
from collections import deque from math import floor from random import random from time import time class _SCREAMING_SNAKE_CASE : def __init__( self )-> List[str]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]: if self.graph.get(_SCREAMING_SNAKE_CASE ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: lowerCamelCase_ =[[w, v]] if not self.graph.get(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =[] def _snake_case ( self )-> str: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]: lowerCamelCase_ =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: return len(self.graph[u] ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]: lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return sorted_nodes def _snake_case ( self )-> str: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin class _SCREAMING_SNAKE_CASE : def __init__( self )-> Optional[Any]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]: # check if the u exists if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist lowerCamelCase_ =[[w, v]] # add the other way if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist lowerCamelCase_ =[[w, u]] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) # the other way round if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return len(self.graph[u] ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self )-> Optional[Any]: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin
75
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __UpperCamelCase ( _A : List[Any] , _A : str , _A : Dict ) ->Any: """simple docstring""" lowerCamelCase_ =AutoConfig.from_pretrained(_A ) lowerCamelCase_ =FlaxAutoModelForSeqaSeqLM.from_config(config=_A ) lowerCamelCase_ =checkpoints.load_tax_checkpoint(_A ) lowerCamelCase_ ="""wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": lowerCamelCase_ ="""SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase_ ="""LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ ="""TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`""" """ attribute with a value from [\'local\', \'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase_ =f'layers_{str(_A )}' # Self-Attention lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization lowerCamelCase_ =tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning lowerCamelCase_ =flax_model.params["""encoder"""]["""block"""][str(_A )]["""layer"""] lowerCamelCase_ =tax_attention_key lowerCamelCase_ =tax_attention_out lowerCamelCase_ =tax_attention_query lowerCamelCase_ =tax_attention_value lowerCamelCase_ =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ =tax_global_layer_norm if split_mlp_wi: lowerCamelCase_ =tax_mlp_wi_a lowerCamelCase_ =tax_mlp_wi_a else: lowerCamelCase_ =tax_mlp_wi lowerCamelCase_ =tax_mlp_wo lowerCamelCase_ =tax_mlp_layer_norm lowerCamelCase_ =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase_ =tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T lowerCamelCase_ =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ =tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T lowerCamelCase_ =tax_encoder_global_rel_embedding # Assigning lowerCamelCase_ =tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] lowerCamelCase_ =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase_ =f'layers_{str(_A )}' # Self-Attention lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] lowerCamelCase_ =tax_enc_dec_attention_module["""key"""]["""kernel"""] lowerCamelCase_ =tax_enc_dec_attention_module["""out"""]["""kernel"""] lowerCamelCase_ =tax_enc_dec_attention_module["""query"""]["""kernel"""] lowerCamelCase_ =tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization lowerCamelCase_ =tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning lowerCamelCase_ =flax_model.params["""decoder"""]["""block"""][str(_A )]["""layer"""] lowerCamelCase_ =tax_attention_key lowerCamelCase_ =tax_attention_out lowerCamelCase_ =tax_attention_query lowerCamelCase_ =tax_attention_value lowerCamelCase_ =tax_pre_attention_layer_norm lowerCamelCase_ =tax_enc_dec_attention_key lowerCamelCase_ =tax_enc_dec_attention_out lowerCamelCase_ =tax_enc_dec_attention_query lowerCamelCase_ =tax_enc_dec_attention_value lowerCamelCase_ =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase_ =tax_mlp_wi_a lowerCamelCase_ =tax_mlp_wi_a else: lowerCamelCase_ =tax_mlp_wi lowerCamelCase_ =tax_mlp_wo lowerCamelCase_ =txa_mlp_layer_norm lowerCamelCase_ =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase_ =tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] lowerCamelCase_ =txa_decoder_norm # Only for layer 0: lowerCamelCase_ =tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T lowerCamelCase_ =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase_ =tax_model["""target"""]["""token_embedder"""]["""embedding"""] lowerCamelCase_ =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase_ =tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(_A ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) __A : List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
703
import os from datetime import datetime as dt from github import Github __A : Optional[int] = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase_ =g.get_repo("""huggingface/diffusers""" ) lowerCamelCase_ =repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase_ =sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase_ =comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
75
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __A : int = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) __A : Union[str, Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) __A : Union[str, Any] = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) __A : Union[str, Any] = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) __A : Dict = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]), ('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) __A : int = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) __A : Optional[int] = ( ('''JH AH TH KH QH''', 23), ('''JH 9H TH KH QH''', 22), ('''JC KH JS JD JH''', 21), ('''KH KC 3S 3H 3D''', 20), ('''8C 9C 5C 3C TC''', 19), ('''JS QS 9H TS KH''', 18), ('''7C 7S KH 2H 7H''', 17), ('''3C KH 5D 5S KH''', 16), ('''QH 8H KD JH 8S''', 15), ('''2D 6D 9D TH 7D''', 14), ) def SCREAMING_SNAKE_CASE_ ( ) ->Dict: """simple docstring""" lowerCamelCase_ =randrange(len(_lowercase ) ), randrange(len(_lowercase ) ) lowerCamelCase_ =['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] lowerCamelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def SCREAMING_SNAKE_CASE_ ( _A : Any = 100 ) ->Any: """simple docstring""" return (generate_random_hand() for _ in range(_lowercase )) @pytest.mark.parametrize("""hand, expected""" , _lowercase ) def SCREAMING_SNAKE_CASE_ ( _A : List[str] , _A : List[str] ) ->Union[str, Any]: """simple docstring""" assert PokerHand(_lowercase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , _lowercase ) def SCREAMING_SNAKE_CASE_ ( _A : int , _A : Union[str, Any] ) ->List[str]: """simple docstring""" assert PokerHand(_lowercase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , _lowercase ) def SCREAMING_SNAKE_CASE_ ( _A : int , _A : Optional[int] , _A : Tuple ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ =PokerHand(_lowercase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , _lowercase ) def SCREAMING_SNAKE_CASE_ ( _A : str , _A : Optional[int] ) ->Union[str, Any]: """simple docstring""" assert PokerHand(_lowercase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , _lowercase ) def SCREAMING_SNAKE_CASE_ ( _A : Optional[Any] , _A : Optional[int] ) ->Union[str, Any]: """simple docstring""" assert PokerHand(_lowercase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , _lowercase ) def SCREAMING_SNAKE_CASE_ ( _A : Optional[Any] , _A : Any , _A : Tuple ) ->Dict: """simple docstring""" assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def SCREAMING_SNAKE_CASE_ ( _A : Tuple , _A : Union[str, Any] , _A : str ) ->str: """simple docstring""" assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected def SCREAMING_SNAKE_CASE_ ( ) ->Dict: """simple docstring""" lowerCamelCase_ =[PokerHand(_lowercase ) for hand in SORTED_HANDS] lowerCamelCase_ =poker_hands.copy() shuffle(_lowercase ) lowerCamelCase_ =chain(sorted(_lowercase ) ) for index, hand in enumerate(_lowercase ): assert hand == poker_hands[index] def SCREAMING_SNAKE_CASE_ ( ) ->Any: """simple docstring""" # Test that five high straights are compared correctly. lowerCamelCase_ =[PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=_lowercase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def SCREAMING_SNAKE_CASE_ ( ) ->List[str]: """simple docstring""" # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. lowerCamelCase_ =PokerHand("""2C 4S AS 3D 5C""" ) lowerCamelCase_ =True lowerCamelCase_ =[5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def SCREAMING_SNAKE_CASE_ ( ) ->Any: """simple docstring""" # Problem number 54 from Project Euler # Testing from poker_hands.txt file lowerCamelCase_ =0 lowerCamelCase_ =os.path.abspath(os.path.dirname(_lowercase ) ) lowerCamelCase_ =os.path.join(_lowercase , """poker_hands.txt""" ) with open(_lowercase ) as file_hand: for line in file_hand: lowerCamelCase_ =line[:14].strip() lowerCamelCase_ =line[15:].strip() lowerCamelCase_ =PokerHand(_lowercase ), PokerHand(_lowercase ) lowerCamelCase_ =player.compare_with(_lowercase ) if output == "Win": answer += 1 assert answer == 376
704
import argparse import os import re __A : Optional[Any] = 'src/diffusers' # Pattern that looks at the indentation in a line. __A : int = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __A : Dict = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __A : Optional[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __A : int = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __A : Optional[Any] = re.compile(R'\[([^\]]+)\]') def __UpperCamelCase ( _A : int ) ->Dict: """simple docstring""" lowerCamelCase_ =_re_indent.search(_A ) return "" if search is None else search.groups()[0] def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[int]="" , _A : int=None , _A : List[str]=None ) ->List[Any]: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 lowerCamelCase_ =["""\n""".join(lines[:index] )] else: lowerCamelCase_ =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase_ =[lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_A ) ) if index < len(_A ) - 1: lowerCamelCase_ =[lines[index + 1]] index += 1 else: lowerCamelCase_ =[] else: blocks.append("""\n""".join(_A ) ) lowerCamelCase_ =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append("""\n""".join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __UpperCamelCase ( _A : Optional[int] ) ->Optional[int]: """simple docstring""" def _inner(_A : Optional[Any] ): return key(_A ).lower().replace("""_""" , """""" ) return _inner def __UpperCamelCase ( _A : int , _A : List[Any]=None ) ->List[str]: """simple docstring""" # If no key is provided, we use a noop. def noop(_A : List[str] ): return x if key is None: lowerCamelCase_ =noop # Constants are all uppercase, they go first. lowerCamelCase_ =[obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase_ =[obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase_ =[obj for obj in objects if not key(_A )[0].isupper()] lowerCamelCase_ =ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def __UpperCamelCase ( _A : List[str] ) ->List[str]: """simple docstring""" # This inner function sort imports between [ ]. def _replace(_A : Optional[Any] ): lowerCamelCase_ =match.groups()[0] if "," not in imports: return f'[{imports}]' lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" lowerCamelCase_ =import_statement.split("""\n""" ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase_ =2 if lines[1].strip() == """[""" else 1 lowerCamelCase_ =[(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase_ =sort_objects(_A , key=lambda _A : x[1] ) lowerCamelCase_ =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase_ =_re_bracket_content.sub(_replace , lines[1] ) else: lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] lowerCamelCase_ =get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase_ =_re_bracket_content.sub(_replace , _A ) return import_statement def __UpperCamelCase ( _A : List[Any] , _A : Optional[Any]=True ) ->str: """simple docstring""" with open(_A , """r""" ) as f: lowerCamelCase_ =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase_ =split_code_in_indented_blocks( _A , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase_ =main_blocks[block_idx] lowerCamelCase_ =block.split("""\n""" ) # Get to the start of the imports. lowerCamelCase_ =0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase_ =len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase_ ="""\n""".join(block_lines[line_idx:-1] ) lowerCamelCase_ =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase_ =split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase_ =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase_ =[(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase_ =[(i, key) for i, key in enumerate(_A ) if key is not None] lowerCamelCase_ =[x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase_ =0 lowerCamelCase_ =[] for i in range(len(_A ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCamelCase_ =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase_ ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , """w""" ) as f: f.write("""\n""".join(_A ) ) def __UpperCamelCase ( _A : str=True ) ->List[Any]: """simple docstring""" lowerCamelCase_ =[] for root, _, files in os.walk(_A ): if "__init__.py" in files: lowerCamelCase_ =sort_imports(os.path.join(_A , """__init__.py""" ) , check_only=_A ) if result: lowerCamelCase_ =[os.path.join(_A , """__init__.py""" )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __A : Optional[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
75
0
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __A : Union[str, Any] = re.compile(R'\s+') def __UpperCamelCase ( _A : Any ) ->Optional[int]: """simple docstring""" return {"hash": hashlib.mda(re.sub(_A , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def __UpperCamelCase ( _A : str ) ->List[str]: """simple docstring""" lowerCamelCase_ =[len(_A ) for line in example["content"].splitlines()] return {"line_mean": np.mean(_A ), "line_max": max(_A )} def __UpperCamelCase ( _A : int ) ->str: """simple docstring""" lowerCamelCase_ =np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def __UpperCamelCase ( _A : Optional[int] , _A : Optional[Any] ) ->List[Any]: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def __UpperCamelCase ( _A : Optional[Any] , _A : Dict=5 ) ->Any: """simple docstring""" lowerCamelCase_ =["auto-generated", "autogenerated", "automatically generated"] lowerCamelCase_ =example["content"].splitlines() for _, line in zip(range(_A ) , _A ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __UpperCamelCase ( _A : Optional[int] , _A : Optional[int]=5 , _A : Optional[int]=0.0_5 ) ->List[Any]: """simple docstring""" lowerCamelCase_ =["unit tests", "test file", "configuration file"] lowerCamelCase_ =example["content"].splitlines() lowerCamelCase_ =0 lowerCamelCase_ =0 # first test for _, line in zip(range(_A ) , _A ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test lowerCamelCase_ =example["content"].count("""\n""" ) lowerCamelCase_ =int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __UpperCamelCase ( _A : List[Any] ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ =["def ", "class ", "for ", "while "] lowerCamelCase_ =example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __UpperCamelCase ( _A : Tuple , _A : List[str]=4 ) ->List[Any]: """simple docstring""" lowerCamelCase_ =example["content"].splitlines() lowerCamelCase_ =0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __UpperCamelCase ( _A : Union[str, Any] ) ->List[str]: """simple docstring""" lowerCamelCase_ =tokenizer(example["""content"""] , truncation=_A )["input_ids"] lowerCamelCase_ =len(example["""content"""] ) / len(_A ) return {"ratio": ratio} def __UpperCamelCase ( _A : int ) ->Tuple: """simple docstring""" lowerCamelCase_ ={} results.update(get_hash(_A ) ) results.update(line_stats(_A ) ) results.update(alpha_stats(_A ) ) results.update(char_token_ratio(_A ) ) results.update(is_autogenerated(_A ) ) results.update(is_config_or_test(_A ) ) results.update(has_no_keywords(_A ) ) results.update(has_few_assignments(_A ) ) return results def __UpperCamelCase ( _A : List[Any] , _A : Dict , _A : int ) ->List[Any]: """simple docstring""" if not check_uniques(_A , _A ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __UpperCamelCase ( _A : Dict ) ->Union[str, Any]: """simple docstring""" with open(_A , """rb""" ) as f_in: with gzip.open(str(_A ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(_A , _A ) os.unlink(_A ) # Settings __A : str = HfArgumentParser(PreprocessingArguments) __A : List[str] = parser.parse_args() if args.num_workers is None: __A : Union[str, Any] = multiprocessing.cpu_count() __A : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __A : str = time.time() __A : str = load_dataset(args.dataset_name, split='train') print(F"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing __A : List[str] = time.time() __A : Dict = ds.map(preprocess, num_proc=args.num_workers) print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes __A : List[str] = set(ds.unique('hash')) __A : int = len(uniques) / len(ds) print(F"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics __A : List[str] = time.time() __A : List[Any] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args}) print(F"""Time to filter dataset: {time.time()-t_start:.2f}""") print(F"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __A : str = time.time() __A : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(F"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file __A : Optional[int] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / 'duplicate_clusters.json', 'w') as f: json.dump(duplicate_clusters, f) __A : str = output_dir / "data" data_dir.mkdir(exist_ok=True) __A : Optional[Any] = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __A : Any = str(data_dir / F"""file-{file_number+1:012}.json""") __A : List[str] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
705
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : str = {'vocab_file': 'sentencepiece.model'} __A : Optional[Any] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } __A : int = { 'google/rembert': 2_56, } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:List[Any] = VOCAB_FILES_NAMES _UpperCamelCase:Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , )-> str: super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =remove_space lowerCamelCase_ =keep_accents lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> Dict: return len(self.sp_model ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self )-> Optional[Any]: lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =d lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: lowerCamelCase_ =self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE ) return pieces def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE ) return out_string def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
75
0
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : List[str] = logging.get_logger(__name__) __A : Union[str, Any] = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class _SCREAMING_SNAKE_CASE ( __lowerCamelCase): _UpperCamelCase:str = "segformer" def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-6 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , )-> Optional[Any]: super().__init__(**UpperCAmelCase_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCAmelCase_ , ) lowerCamelCase_ =num_channels lowerCamelCase_ =num_encoder_blocks lowerCamelCase_ =depths lowerCamelCase_ =sr_ratios lowerCamelCase_ =hidden_sizes lowerCamelCase_ =patch_sizes lowerCamelCase_ =strides lowerCamelCase_ =mlp_ratios lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =drop_path_rate lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =decoder_hidden_size lowerCamelCase_ =kwargs.get("""reshape_last_stage""" , UpperCAmelCase_ ) lowerCamelCase_ =semantic_loss_ignore_index class _SCREAMING_SNAKE_CASE ( __lowerCamelCase): _UpperCamelCase:List[Any] = version.parse("1.11") @property def _snake_case ( self )-> Union[str, Any]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self )-> Tuple: return 1E-4 @property def _snake_case ( self )-> Optional[int]: return 12
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[str]: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) ) self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) ) def _snake_case ( self )-> int: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =get_activation("""gelu_10""" ) lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self )-> Dict: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation("""bogus""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =1 lowerCamelCase_ =get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =acta.a
75
0
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __A : Dict = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( UpperCAmelCase_): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> None: warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , _lowercase , ) super().__init__(*_lowercase , **_lowercase )
707
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __UpperCamelCase ( ) ->List[str]: """simple docstring""" lowerCamelCase_ =_ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ =get_sagemaker_input() else: lowerCamelCase_ =get_cluster_input() return config def __UpperCamelCase ( _A : List[str]=None ) ->str: """simple docstring""" if subparsers is not None: lowerCamelCase_ =subparsers.add_parser("""config""" , description=_A ) else: lowerCamelCase_ =argparse.ArgumentParser("""Accelerate config command""" , description=_A ) parser.add_argument( """--config_file""" , default=_A , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =get_user_input() if args.config_file is not None: lowerCamelCase_ =args.config_file else: if not os.path.isdir(_A ): os.makedirs(_A ) lowerCamelCase_ =default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(_A ) else: config.to_yaml_file(_A ) print(f'accelerate configuration saved at {config_file}' ) def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =config_command_parser() lowerCamelCase_ =parser.parse_args() config_command(_A ) if __name__ == "__main__": main()
75
0
'''simple docstring''' def __UpperCamelCase ( _A : int , _A : int ) ->str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) lowerCamelCase_ =str(bin(__A ) ) binary_number += "0" * shift_amount return binary_number def __UpperCamelCase ( _A : int , _A : int ) ->str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) lowerCamelCase_ =str(bin(__A ) )[2:] if shift_amount >= len(__A ): return "0b0" lowerCamelCase_ =binary_number[: len(__A ) - shift_amount] return "0b" + shifted_binary_number def __UpperCamelCase ( _A : int , _A : int ) ->str: """simple docstring""" if number >= 0: # Get binary representation of positive number lowerCamelCase_ ='''0''' + str(bin(__A ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number lowerCamelCase_ =len(bin(__A )[3:] ) # Find 2's complement of number lowerCamelCase_ =bin(abs(__A ) - (1 << binary_number_length) )[3:] lowerCamelCase_ =( '''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number ) if shift_amount >= len(__A ): return "0b" + binary_number[0] * len(__A ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__A ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
708
def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[[] for _ in range(_A )] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(_A ) <= key: return input_string for position, character in enumerate(_A ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_A ) lowerCamelCase_ =["""""".join(_A ) for row in temp_grid] lowerCamelCase_ ="""""".join(_A ) return output_string def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string lowerCamelCase_ =[[] for _ in range(_A )] # generates template for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) lowerCamelCase_ =0 for row in temp_grid: # fills in the characters lowerCamelCase_ =input_string[counter : counter + len(_A )] grid.append(list(_A ) ) counter += len(_A ) lowerCamelCase_ ="""""" # reads as zigzag for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __UpperCamelCase ( _A : str ) ->dict[int, str]: """simple docstring""" lowerCamelCase_ ={} for key_guess in range(1 , len(_A ) ): # tries every key lowerCamelCase_ =decrypt(_A , _A ) return results if __name__ == "__main__": import doctest doctest.testmod()
75
0
import pickle import numpy as np from matplotlib import pyplot as plt class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=0.2 )-> Optional[Any]: lowerCamelCase_ =bp_numa lowerCamelCase_ =bp_numa lowerCamelCase_ =bp_numa lowerCamelCase_ =conva_get[:2] lowerCamelCase_ =conva_get[2] lowerCamelCase_ =size_pa lowerCamelCase_ =rate_w lowerCamelCase_ =rate_t lowerCamelCase_ =[ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowerCamelCase_ =np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowerCamelCase_ =np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowerCamelCase_ =-2 * np.random.rand(self.conva[1] ) + 1 lowerCamelCase_ =-2 * np.random.rand(self.num_bpa ) + 1 lowerCamelCase_ =-2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str: # save model dict with pickle lowerCamelCase_ ={ """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(_lowerCAmelCase , """wb""" ) as f: pickle.dump(_lowerCAmelCase , _lowerCAmelCase ) print(f'Model saved: {save_path}' ) @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE )-> Tuple: # read saved model with open(_lowerCAmelCase , """rb""" ) as f: lowerCamelCase_ =pickle.load(_lowerCAmelCase ) # noqa: S301 lowerCamelCase_ =model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) lowerCamelCase_ =model_dic.get("""size_pooling1""" ) lowerCamelCase_ =model_dic.get("""num_bp1""" ) lowerCamelCase_ =model_dic.get("""num_bp2""" ) lowerCamelCase_ =model_dic.get("""num_bp3""" ) lowerCamelCase_ =model_dic.get("""rate_weight""" ) lowerCamelCase_ =model_dic.get("""rate_thre""" ) # create model instance lowerCamelCase_ =CNN(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # modify model parameter lowerCamelCase_ =model_dic.get("""w_conv1""" ) lowerCamelCase_ =model_dic.get("""wkj""" ) lowerCamelCase_ =model_dic.get("""vji""" ) lowerCamelCase_ =model_dic.get("""thre_conv1""" ) lowerCamelCase_ =model_dic.get("""thre_bp2""" ) lowerCamelCase_ =model_dic.get("""thre_bp3""" ) return conv_ins def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: return round(_lowerCAmelCase , 3 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: # convolution process lowerCamelCase_ =convs[0] lowerCamelCase_ =convs[1] lowerCamelCase_ =np.shape(_lowerCAmelCase )[0] # get the data slice of original image data, data_focus lowerCamelCase_ =[] for i_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ): lowerCamelCase_ =data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_lowerCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowerCamelCase_ =[] lowerCamelCase_ =int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_lowerCAmelCase ): lowerCamelCase_ =[] for i_focus in range(len(_lowerCAmelCase ) ): lowerCamelCase_ =( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_lowerCAmelCase ) ) lowerCamelCase_ =np.asmatrix(_lowerCAmelCase ).reshape( _lowerCAmelCase , _lowerCAmelCase ) data_featuremap.append(_lowerCAmelCase ) # expanding the data slice to One dimenssion lowerCamelCase_ =[] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_lowerCAmelCase ) ) lowerCamelCase_ =np.asarray(_lowerCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="average_pool" )-> str: # pooling process lowerCamelCase_ =len(featuremaps[0] ) lowerCamelCase_ =int(size_map / size_pooling ) lowerCamelCase_ =[] for i_map in range(len(_lowerCAmelCase ) ): lowerCamelCase_ =featuremaps[i_map] lowerCamelCase_ =[] for i_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ): for j_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ): lowerCamelCase_ =feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_lowerCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_lowerCAmelCase ) ) lowerCamelCase_ =np.asmatrix(_lowerCAmelCase ).reshape(_lowerCAmelCase , _lowerCAmelCase ) featuremap_pooled.append(_lowerCAmelCase ) return featuremap_pooled def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict: # expanding three dimension data to one dimension list lowerCamelCase_ =[] for i in range(len(_lowerCAmelCase ) ): lowerCamelCase_ =np.shape(data[i] ) lowerCamelCase_ =data[i].reshape(1 , shapes[0] * shapes[1] ) lowerCamelCase_ =data_listed.getA().tolist()[0] data_expanded.extend(_lowerCAmelCase ) lowerCamelCase_ =np.asarray(_lowerCAmelCase ) return data_expanded def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]: # expanding matrix to one dimension list lowerCamelCase_ =np.asarray(_lowerCAmelCase ) lowerCamelCase_ =np.shape(_lowerCAmelCase ) lowerCamelCase_ =data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =[] lowerCamelCase_ =0 for i_map in range(_lowerCAmelCase ): lowerCamelCase_ =np.ones((size_map, size_map) ) for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ): for j in range(0 , _lowerCAmelCase , _lowerCAmelCase ): lowerCamelCase_ =pd_pool[ i_pool ] lowerCamelCase_ =i_pool + 1 lowerCamelCase_ =np.multiply( _lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_lowerCAmelCase ) return pd_all def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=bool )-> List[Any]: # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(_lowerCAmelCase )) ) print((""" - - Shape: Teach_Data """, np.shape(_lowerCAmelCase )) ) lowerCamelCase_ =0 lowerCamelCase_ =[] lowerCamelCase_ =1_0000 while rp < n_repeat and mse >= error_accuracy: lowerCamelCase_ =0 print(f'-------------Learning Time {rp}--------------' ) for p in range(len(_lowerCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowerCamelCase_ =np.asmatrix(datas_train[p] ) lowerCamelCase_ =np.asarray(datas_teach[p] ) lowerCamelCase_ , lowerCamelCase_ =self.convolute( _lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCamelCase_ =self.pooling(_lowerCAmelCase , self.size_poolinga ) lowerCamelCase_ =np.shape(_lowerCAmelCase ) lowerCamelCase_ =self._expand(_lowerCAmelCase ) lowerCamelCase_ =data_bp_input lowerCamelCase_ =np.dot(_lowerCAmelCase , self.vji.T ) - self.thre_bpa lowerCamelCase_ =self.sig(_lowerCAmelCase ) lowerCamelCase_ =np.dot(_lowerCAmelCase , self.wkj.T ) - self.thre_bpa lowerCamelCase_ =self.sig(_lowerCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowerCamelCase_ =np.multiply( (data_teach - bp_outa) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) ) lowerCamelCase_ =np.multiply( np.dot(_lowerCAmelCase , self.wkj ) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) ) lowerCamelCase_ =np.dot(_lowerCAmelCase , self.vji ) lowerCamelCase_ =pd_i_all / (self.size_poolinga * self.size_poolinga) lowerCamelCase_ =pd_conva_pooled.T.getA().tolist() lowerCamelCase_ =self._calculate_gradient_from_pool( _lowerCAmelCase , _lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowerCamelCase_ =self._expand_mat(pd_conva_all[k_conv] ) lowerCamelCase_ =self.rate_weight * np.dot(_lowerCAmelCase , _lowerCAmelCase ) lowerCamelCase_ =self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowerCamelCase_ =( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowerCamelCase_ =self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowerCamelCase_ =self.vji + pd_j_all.T * bp_outa * self.rate_weight lowerCamelCase_ =self.thre_bpa - pd_k_all * self.rate_thre lowerCamelCase_ =self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowerCamelCase_ =np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowerCamelCase_ =rp + 1 lowerCamelCase_ =error_count / patterns all_mse.append(_lowerCAmelCase ) def draw_error(): lowerCamelCase_ =[error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_lowerCAmelCase , """+-""" ) plt.plot(_lowerCAmelCase , """r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(_lowerCAmelCase , alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, f' - - Mse: {mse:.6f}') ) if draw_e: draw_error() return mse def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: # model predict lowerCamelCase_ =[] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(_lowerCAmelCase )) ) for p in range(len(_lowerCAmelCase ) ): lowerCamelCase_ =np.asmatrix(datas_test[p] ) lowerCamelCase_ , lowerCamelCase_ =self.convolute( _lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCamelCase_ =self.pooling(_lowerCAmelCase , self.size_poolinga ) lowerCamelCase_ =self._expand(_lowerCAmelCase ) lowerCamelCase_ =data_bp_input lowerCamelCase_ =bp_outa * self.vji.T - self.thre_bpa lowerCamelCase_ =self.sig(_lowerCAmelCase ) lowerCamelCase_ =bp_outa * self.wkj.T - self.thre_bpa lowerCamelCase_ =self.sig(_lowerCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowerCamelCase_ =[list(map(self.do_round , _lowerCAmelCase ) ) for each in produce_out] return np.asarray(_lowerCAmelCase ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Any: # return the data of image after convoluting process so we can check it out lowerCamelCase_ =np.asmatrix(_lowerCAmelCase ) lowerCamelCase_ , lowerCamelCase_ =self.convolute( _lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCamelCase_ =self.pooling(_lowerCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
709
from typing import Any class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =data lowerCamelCase_ =None class _SCREAMING_SNAKE_CASE : def __init__( self )-> Any: lowerCamelCase_ =None def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.head while temp is not None: print(temp.data , end=""" """ ) lowerCamelCase_ =temp.next print() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =Node(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.head lowerCamelCase_ =new_node def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if node_data_a == node_data_a: return else: lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next if node_a is None or node_a is None: return lowerCamelCase_ , lowerCamelCase_ =node_a.data, node_a.data if __name__ == "__main__": __A : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
75
0
import os import platform import sys __A : List[str] = "3" print('Python version:', sys.version) print('OS platform:', platform.platform()) print('OS architecture:', platform.machine()) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) except ImportError: print('Torch version:', None) try: import transformers print('transformers version:', transformers.__version__) except ImportError: print('transformers version:', None)
710
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Dict = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = "yolos" def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=[512, 864] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , )-> Tuple: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =qkv_bias lowerCamelCase_ =num_detection_tokens lowerCamelCase_ =use_mid_position_embeddings lowerCamelCase_ =auxiliary_loss # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = version.parse("1.11") @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self )-> float: return 1E-4 @property def _snake_case ( self )-> int: return 12
75
0
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __UpperCamelCase ( _A : int = 8 ) ->Union[str, Any]: """simple docstring""" lowerCamelCase_ =ascii_letters + digits + punctuation return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) ) def __UpperCamelCase ( _A : str , _A : int ) ->Optional[Any]: """simple docstring""" i -= len(_lowerCAmelCase ) lowerCamelCase_ =i // 3 lowerCamelCase_ =i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowerCamelCase_ =( chars_incl + random(_lowerCAmelCase , quotient + remainder ) + random(_lowerCAmelCase , _lowerCAmelCase ) + random(_lowerCAmelCase , _lowerCAmelCase ) ) lowerCamelCase_ =list(_lowerCAmelCase ) shuffle(_lowerCAmelCase ) return "".join(_lowerCAmelCase ) # random is a generalised function for letters, characters and numbers def __UpperCamelCase ( _A : str , _A : int ) ->Dict: """simple docstring""" return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) ) def __UpperCamelCase ( _A : List[str] , _A : Tuple ) ->Any: """simple docstring""" pass # Put your code here... def __UpperCamelCase ( _A : int , _A : int ) ->Union[str, Any]: """simple docstring""" pass # Put your code here... def __UpperCamelCase ( _A : Dict , _A : Dict ) ->int: """simple docstring""" pass # Put your code here... def __UpperCamelCase ( _A : str , _A : int = 8 ) ->List[str]: """simple docstring""" if len(_lowerCAmelCase ) < min_length: # Your Password must be at least 8 characters long return False lowerCamelCase_ =any(char in ascii_uppercase for char in password ) lowerCamelCase_ =any(char in ascii_lowercase for char in password ) lowerCamelCase_ =any(char in digits for char in password ) lowerCamelCase_ =any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __UpperCamelCase ( ) ->List[str]: """simple docstring""" lowerCamelCase_ =int(input("""Please indicate the max length of your password: """ ).strip() ) lowerCamelCase_ =input( """Please indicate the characters that must be in your password: """ ).strip() print("""Password generated:""" , password_generator(_lowerCAmelCase ) ) print( """Alternative Password generated:""" , alternative_password_generator(_lowerCAmelCase , _lowerCAmelCase ) , ) print("""[If you are thinking of using this passsword, You better save it.]""" ) if __name__ == "__main__": main()
711
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __A : List[Any] = 'src/transformers' __A : Tuple = 'docs/source/en' __A : Optional[int] = '.' def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : Optional[Any] ) ->Optional[Any]: """simple docstring""" with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase_ =f.readlines() # Find the start prompt. lowerCamelCase_ =0 while not lines[start_index].startswith(_A ): start_index += 1 start_index += 1 lowerCamelCase_ =start_index while not lines[end_index].startswith(_A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __A : Dict = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. __A : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') __A : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __A : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # This is to make sure the transformers module imported is the one in the repo. __A : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) def __UpperCamelCase ( _A : List[Any] ) ->str: """simple docstring""" lowerCamelCase_ =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _A ) return [m.group(0 ) for m in matches] def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =2 if text == """✅""" or text == """❌""" else len(_A ) lowerCamelCase_ =(width - text_length) // 2 lowerCamelCase_ =width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __UpperCamelCase ( ) ->Any: """simple docstring""" lowerCamelCase_ =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowerCamelCase_ ={ name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowerCamelCase_ ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) # Let's lookup through all transformers object (once). for attr_name in dir(_A ): lowerCamelCase_ =None if attr_name.endswith("""Tokenizer""" ): lowerCamelCase_ =slow_tokenizers lowerCamelCase_ =attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowerCamelCase_ =fast_tokenizers lowerCamelCase_ =attr_name[:-13] elif _re_tf_models.match(_A ) is not None: lowerCamelCase_ =tf_models lowerCamelCase_ =_re_tf_models.match(_A ).groups()[0] elif _re_flax_models.match(_A ) is not None: lowerCamelCase_ =flax_models lowerCamelCase_ =_re_flax_models.match(_A ).groups()[0] elif _re_pt_models.match(_A ) is not None: lowerCamelCase_ =pt_models lowerCamelCase_ =_re_pt_models.match(_A ).groups()[0] if lookup_dict is not None: while len(_A ) > 0: if attr_name in model_name_to_prefix.values(): lowerCamelCase_ =True break # Try again after removing the last word in the name lowerCamelCase_ ="""""".join(camel_case_split(_A )[:-1] ) # Let's build that table! lowerCamelCase_ =list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowerCamelCase_ =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowerCamelCase_ =[len(_A ) + 2 for c in columns] lowerCamelCase_ =max([len(_A ) for name in model_names] ) + 2 # Build the table per se lowerCamelCase_ ="""|""" + """|""".join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowerCamelCase_ ={True: """✅""", False: """❌"""} for name in model_names: lowerCamelCase_ =model_name_to_prefix[name] lowerCamelCase_ =[ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n" return table def __UpperCamelCase ( _A : str=False ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_find_text_in_file( filename=os.path.join(_A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowerCamelCase_ =get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __A : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
75
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : int = logging.get_logger(__name__) __A : Union[str, Any] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _SCREAMING_SNAKE_CASE ( UpperCamelCase_): _UpperCamelCase:Union[str, Any] = """xmod""" def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=("en_XX",) , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> Optional[Any]: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =classifier_dropout lowerCamelCase_ =pre_norm lowerCamelCase_ =adapter_reduction_factor lowerCamelCase_ =adapter_layer_norm lowerCamelCase_ =adapter_reuse_layer_norm lowerCamelCase_ =ln_before_adapter lowerCamelCase_ =list(_a ) lowerCamelCase_ =default_language class _SCREAMING_SNAKE_CASE ( UpperCamelCase_): @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ ={0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCamelCase_ ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
712
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =num_stages lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =out_features lowerCamelCase_ =num_labels lowerCamelCase_ =scope lowerCamelCase_ =num_stages def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def _snake_case ( self )-> List[Any]: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _snake_case ( self )-> Union[str, Any]: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _snake_case ( self )-> str: lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =config_and_inputs lowerCamelCase_ ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else () _UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Dict = False _UpperCamelCase:int = False _UpperCamelCase:Any = False _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Optional[Any] = False def _snake_case ( self )-> int: lowerCamelCase_ =UperNetModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )-> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )-> Tuple: return def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _snake_case ( self )-> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _snake_case ( self )-> str: pass def _snake_case ( self )-> Optional[int]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _snake_case ( self )-> Dict: pass @slow def _snake_case ( self )-> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( ) ->Tuple: """simple docstring""" lowerCamelCase_ =hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) lowerCamelCase_ =Image.open(_A ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[Any]: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) def _snake_case ( self )-> int: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
75
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _SCREAMING_SNAKE_CASE ( _snake_case): @slow @require_torch def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) lowerCamelCase_ =BertTokenizer.from_pretrained("""bert-base-uncased""" ) lowerCamelCase_ =bertabert.config.encoder.vocab_size lowerCamelCase_ =tokenizer.sep_token_id lowerCamelCase_ =tokenizer.cls_token_id lowerCamelCase_ =128 lowerCamelCase_ =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) lowerCamelCase_ =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) lowerCamelCase_ =train_dataset.select(range(32 ) ) lowerCamelCase_ =val_dataset.select(range(16 ) ) lowerCamelCase_ =4 def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ): # Tokenizer will automatically set [BOS] <text> [EOS] lowerCamelCase_ =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 ) lowerCamelCase_ =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 ) lowerCamelCase_ =inputs.input_ids lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =outputs.input_ids lowerCamelCase_ =outputs.input_ids.copy() lowerCamelCase_ =[ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] lowerCamelCase_ =outputs.attention_mask assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =pred.label_ids lowerCamelCase_ =pred.predictions # all unnecessary tokens are removed lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowerCamelCase_ =train_dataset.map( _map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset lowerCamelCase_ =val_dataset.map( _map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) lowerCamelCase_ =self.get_auto_remove_tmp_dir() lowerCamelCase_ =SeqaSeqTrainingArguments( output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowerCamelCase_ =SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # start training trainer.train()
713
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = ["pixel_values"] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] ) lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( _SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature: lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ ={"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
75
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : List[str] = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __A : str = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __A : List[Any] = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __A : Union[str, Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
714
# Imports import numpy as np class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: if red is not None: lowerCamelCase_ =red if green is not None: lowerCamelCase_ =green if blue is not None: lowerCamelCase_ =blue if red_edge is not None: lowerCamelCase_ =red_edge if nir is not None: lowerCamelCase_ =nir return True def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={ """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def _snake_case ( self )-> Optional[Any]: return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case ( self )-> Tuple: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case ( self )-> str: return self.nir * (self.red / (self.green**2)) def _snake_case ( self )-> Optional[int]: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case ( self )-> Tuple: return (self.nir - self.red) / (self.nir + self.red) def _snake_case ( self )-> Dict: return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case ( self )-> List[Any]: return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case ( self )-> Tuple: return (self.nir - self.green) / (self.nir + self.green) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case ( self )-> Tuple: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case ( self )-> Any: return (self.nir / self.green) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.nir / self.redEdge) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.red - self.blue) / self.red def _snake_case ( self )-> Dict: lowerCamelCase_ =self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case ( self )-> int: return self.nir - self.green def _snake_case ( self )-> Dict: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case ( self )-> List[str]: lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]: return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case ( self )-> int: return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: return (self.nir - b) / (a * self.red) def _snake_case ( self )-> int: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case ( self )-> Optional[Any]: return (self.red + self.green + self.blue) / 3_0.5 def _snake_case ( self )-> List[str]: return self.nir / self.red def _snake_case ( self )-> List[str]: return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case ( self )-> str: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case ( self )-> List[Any]: return self.green / (self.nir + self.red + self.green) def _snake_case ( self )-> Dict: return self.nir / (self.nir + self.red + self.green) def _snake_case ( self )-> List[str]: return self.red / (self.nir + self.red + self.green) def _snake_case ( self )-> int: return (self.green - self.red) / (self.green + self.red) def _snake_case ( self )-> str: return (self.red - self.green) / (self.red + self.green) def _snake_case ( self )-> str: lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case ( self )-> List[str]: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case ( self )-> List[Any]: return self.nir / self.red def _snake_case ( self )-> Optional[int]: return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case ( self )-> str: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
75
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __A : Optional[int] = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] __A : Any = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] __A : Any = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) __A : Tuple = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) __A : List[Any] = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def __UpperCamelCase ( _A : Any , _A : Tuple ) ->Optional[int]: """simple docstring""" for tf_name, hf_name in patterns: lowerCamelCase_ =k.replace(__A , __A ) return k def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Tuple: """simple docstring""" lowerCamelCase_ =BigBirdPegasusConfig(**__A ) lowerCamelCase_ =BigBirdPegasusForConditionalGeneration(__A ) lowerCamelCase_ =torch_model.state_dict() lowerCamelCase_ ={} # separating decoder weights lowerCamelCase_ ={k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} lowerCamelCase_ ={k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): lowerCamelCase_ =[k.endswith(__A ) for ending in KEYS_TO_IGNORE] if any(__A ): continue lowerCamelCase_ =DECODER_PATTERNS lowerCamelCase_ =rename_state_dict_key(__A , __A ) if new_k not in state_dict: raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCamelCase_ =v.T lowerCamelCase_ =torch.from_numpy(__A ) assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): lowerCamelCase_ =[k.endswith(__A ) for ending in KEYS_TO_IGNORE] if any(__A ): continue lowerCamelCase_ =REMAINING_PATTERNS lowerCamelCase_ =rename_state_dict_key(__A , __A ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCamelCase_ =v.T lowerCamelCase_ =torch.from_numpy(__A ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' lowerCamelCase_ =mapping["""model.embed_positions.weight"""] lowerCamelCase_ =mapping.pop("""model.embed_positions.weight""" ) lowerCamelCase_ =torch_model.load_state_dict(__A , strict=__A ) lowerCamelCase_ =[ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}' assert extra == [], f'no matches found for the following tf keys {extra}' return torch_model def __UpperCamelCase ( _A : Tuple ) ->List[str]: """simple docstring""" lowerCamelCase_ =tf.train.list_variables(__A ) lowerCamelCase_ ={} lowerCamelCase_ =["""global_step"""] for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ): lowerCamelCase_ =any(pat in name for pat in ignore_name ) if skip_key: continue lowerCamelCase_ =tf.train.load_variable(__A , __A ) lowerCamelCase_ =array return tf_weights def __UpperCamelCase ( _A : Tuple , _A : List[str] , _A : Union[str, Any] ) ->int: """simple docstring""" lowerCamelCase_ =get_tf_weights_as_numpy(__A ) lowerCamelCase_ =convert_bigbird_pegasus(__A , __A ) torch_model.save_pretrained(__A ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') __A : int = parser.parse_args() __A : List[str] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
715
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
0
import flax.linen as nn import jax import jax.numpy as jnp class _SCREAMING_SNAKE_CASE ( nn.Module): _UpperCamelCase:Union[str, Any] = 42 _UpperCamelCase:Optional[int] = jnp.floataa def _snake_case ( self )-> str: lowerCamelCase_ =nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE )-> str: lowerCamelCase_ =hidden_states.shape lowerCamelCase_ =jax.image.resize( _A , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , ) lowerCamelCase_ =self.conv(_A ) return hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module): _UpperCamelCase:Union[str, Any] = 42 _UpperCamelCase:str = jnp.floataa def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =self.conv(_A ) return hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module): _UpperCamelCase:List[Any] = 42 _UpperCamelCase:Optional[Any] = None _UpperCamelCase:Tuple = 0.0 _UpperCamelCase:str = None _UpperCamelCase:Dict = jnp.floataa def _snake_case ( self )-> int: lowerCamelCase_ =self.in_channels if self.out_channels is None else self.out_channels lowerCamelCase_ =nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) lowerCamelCase_ =nn.Conv( _A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase_ =nn.Dense(_A , dtype=self.dtype ) lowerCamelCase_ =nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) lowerCamelCase_ =nn.Dropout(self.dropout_prob ) lowerCamelCase_ =nn.Conv( _A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase_ =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut lowerCamelCase_ =None if use_nin_shortcut: lowerCamelCase_ =nn.Conv( _A , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True )-> Optional[Any]: lowerCamelCase_ =hidden_states lowerCamelCase_ =self.norma(_A ) lowerCamelCase_ =nn.swish(_A ) lowerCamelCase_ =self.conva(_A ) lowerCamelCase_ =self.time_emb_proj(nn.swish(_A ) ) lowerCamelCase_ =jnp.expand_dims(jnp.expand_dims(_A , 1 ) , 1 ) lowerCamelCase_ =hidden_states + temb lowerCamelCase_ =self.norma(_A ) lowerCamelCase_ =nn.swish(_A ) lowerCamelCase_ =self.dropout(_A , _A ) lowerCamelCase_ =self.conva(_A ) if self.conv_shortcut is not None: lowerCamelCase_ =self.conv_shortcut(_A ) return hidden_states + residual
716
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='%(message)s') def __UpperCamelCase ( _A : np.ndarray ) ->np.ndarray: """simple docstring""" return input_array.reshape((input_array.size, 1) ) def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.mean(1 ) # Centralize the data of class i lowerCamelCase_ =data - column_reshape(_A ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_A , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =np.dot(_A , centered_data.T ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =features.mean(1 ) lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.shape[1] lowerCamelCase_ =data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" # Check if the features have been loaded if features.any(): lowerCamelCase_ =features.mean(1 ) # Center the dataset lowerCamelCase_ =features - np.reshape(_A , (data_mean.size, 1) ) lowerCamelCase_ =np.dot(_A , centered_data.T ) / features.shape[1] lowerCamelCase_ , lowerCamelCase_ =np.linalg.eigh(_A ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCamelCase_ =np.dot(filtered_eigenvectors.T , _A ) logging.info("""Principal Component Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int ) ->np.ndarray: """simple docstring""" assert classes > dimensions # Check if features have been already loaded if features.any: lowerCamelCase_ , lowerCamelCase_ =eigh( covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , ) lowerCamelCase_ =eigenvectors[:, ::-1][:, :dimensions] lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =np.linalg.svd(_A ) lowerCamelCase_ =svd_matrix[:, 0:dimensions] lowerCamelCase_ =np.dot(filtered_svd_matrix.T , _A ) logging.info("""Linear Discriminant Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" # Create dummy dataset with 2 classes and 3 features lowerCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCamelCase_ =np.array([0, 0, 0, 1, 1] ) lowerCamelCase_ =2 lowerCamelCase_ =2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_A ) as error_info: lowerCamelCase_ =linear_discriminant_analysis( _A , _A , _A , _A ) if isinstance(_A , np.ndarray ): raise AssertionError( """Did not raise AssertionError for dimensions > classes""" ) assert error_info.type is AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" lowerCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCamelCase_ =2 lowerCamelCase_ =np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(_A ) as error_info: lowerCamelCase_ =principal_component_analysis(_A , _A ) if not np.allclose(_A , _A ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
75
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : int = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _SCREAMING_SNAKE_CASE ( _A): _UpperCamelCase:Dict = "unispeech" def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_5 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.5 , **_SCREAMING_SNAKE_CASE , )-> List[Any]: super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =feat_extract_norm lowerCamelCase_ =feat_extract_activation lowerCamelCase_ =list(__lowerCamelCase ) lowerCamelCase_ =list(__lowerCamelCase ) lowerCamelCase_ =list(__lowerCamelCase ) lowerCamelCase_ =conv_bias lowerCamelCase_ =num_conv_pos_embeddings lowerCamelCase_ =num_conv_pos_embedding_groups lowerCamelCase_ =len(self.conv_dim ) lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =feat_proj_dropout lowerCamelCase_ =final_dropout lowerCamelCase_ =layerdrop lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =initializer_range lowerCamelCase_ =num_ctc_classes lowerCamelCase_ =vocab_size lowerCamelCase_ =do_stable_layer_norm lowerCamelCase_ =use_weighted_layer_sum lowerCamelCase_ =classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase_ =apply_spec_augment lowerCamelCase_ =mask_time_prob lowerCamelCase_ =mask_time_length lowerCamelCase_ =mask_time_min_masks lowerCamelCase_ =mask_feature_prob lowerCamelCase_ =mask_feature_length lowerCamelCase_ =mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase_ =num_codevectors_per_group lowerCamelCase_ =num_codevector_groups lowerCamelCase_ =contrastive_logits_temperature lowerCamelCase_ =feat_quantizer_dropout lowerCamelCase_ =num_negatives lowerCamelCase_ =codevector_dim lowerCamelCase_ =proj_codevector_dim lowerCamelCase_ =diversity_loss_weight # ctc loss lowerCamelCase_ =ctc_loss_reduction lowerCamelCase_ =ctc_zero_infinity # pretraining loss lowerCamelCase_ =replace_prob @property def _snake_case ( self )-> int: return functools.reduce(operator.mul , self.conv_stride , 1 )
717
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __A : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') __A : str = F"""https://www.google.com/search?q={query}&num=100""" __A : int = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: __A : str = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: __A : Any = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
75
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
718
from ..utils import DummyObject, requires_backends class _SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase__): _UpperCamelCase:List[Any] = ["torch", "torchsde"] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[Any]: requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]: requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str: requires_backends(cls , ["""torch""", """torchsde"""] )
75
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : List[str] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt') def __UpperCamelCase ( _A : Tuple , _A : Dict , _A : Optional[int] = 16000 ) ->List[Any]: """simple docstring""" lowerCamelCase_ =int(round(sample_rate * max_length ) ) if len(_A ) <= sample_length: return wav lowerCamelCase_ =randint(0 , len(_A ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _SCREAMING_SNAKE_CASE : _UpperCamelCase:Optional[str] = field(default=_UpperCAmelCase , metadata={"help": "Name of a dataset from the datasets package"}) _UpperCamelCase:Optional[str] = field( default=_UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}) _UpperCamelCase:Optional[str] = field( default=_UpperCAmelCase , metadata={"help": "A file containing the training audio paths and labels."}) _UpperCamelCase:Optional[str] = field( default=_UpperCAmelCase , metadata={"help": "A file containing the validation audio paths and labels."}) _UpperCamelCase:str = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'" } , ) _UpperCamelCase:str = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to \'validation\'" ) } , ) _UpperCamelCase:str = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , ) _UpperCamelCase:str = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"}) _UpperCamelCase:Optional[int] = field( default=_UpperCAmelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) _UpperCamelCase:Optional[int] = field( default=_UpperCAmelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) _UpperCamelCase:float = field( default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class _SCREAMING_SNAKE_CASE : _UpperCamelCase:str = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) _UpperCamelCase:Optional[str] = field( default=_UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) _UpperCamelCase:Optional[str] = field( default=_UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"}) _UpperCamelCase:str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) _UpperCamelCase:Optional[str] = field( default=_UpperCAmelCase , metadata={"help": "Name or path of preprocessor config."}) _UpperCamelCase:bool = field( default=_UpperCAmelCase , metadata={"help": "Whether to freeze the feature encoder layers of the model."}) _UpperCamelCase:bool = field( default=_UpperCAmelCase , metadata={"help": "Whether to generate an attention mask in the feature extractor."}) _UpperCamelCase:bool = field( default=_UpperCAmelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) _UpperCamelCase:Optional[bool] = field( default=_UpperCAmelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."}) _UpperCamelCase:bool = field( default=_UpperCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def _snake_case ( self )-> int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def __UpperCamelCase ( ) ->List[Any]: """simple docstring""" lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , _A , _A ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase_ =training_args.get_process_log_level() logger.setLevel(_A ) transformers.utils.logging.set_verbosity(_A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowerCamelCase_ =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. lowerCamelCase_ =DatasetDict() lowerCamelCase_ =load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase_ =load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' """Make sure to set `--audio_column_name` to the correct audio column - one of """ f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' """Make sure to set `--label_column_name` to the correct text column - one of """ f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowerCamelCase_ =AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowerCamelCase_ =raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowerCamelCase_ =feature_extractor.model_input_names[0] def train_transforms(_A : Dict ): lowerCamelCase_ =[] for audio in batch[data_args.audio_column_name]: lowerCamelCase_ =random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_A ) lowerCamelCase_ =feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate ) lowerCamelCase_ ={model_input_name: inputs.get(_A )} lowerCamelCase_ =list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_A : Optional[int] ): lowerCamelCase_ =[audio["""array"""] for audio in batch[data_args.audio_column_name]] lowerCamelCase_ =feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate ) lowerCamelCase_ ={model_input_name: inputs.get(_A )} lowerCamelCase_ =list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowerCamelCase_ =raw_datasets["""train"""].features[data_args.label_column_name].names lowerCamelCase_ , lowerCamelCase_ ={}, {} for i, label in enumerate(_A ): lowerCamelCase_ =str(_A ) lowerCamelCase_ =label # Load the accuracy metric from the datasets package lowerCamelCase_ =evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_A : Union[str, Any] ): lowerCamelCase_ =np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_A , references=eval_pred.label_ids ) lowerCamelCase_ =AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel=_A , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase_ =AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowerCamelCase_ =( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_A , output_all_columns=_A ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowerCamelCase_ =( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_A , output_all_columns=_A ) # Initialize our trainer lowerCamelCase_ =Trainer( model=_A , args=_A , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , ) # Training if training_args.do_train: lowerCamelCase_ =None if training_args.resume_from_checkpoint is not None: lowerCamelCase_ =training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase_ =last_checkpoint lowerCamelCase_ =trainer.train(resume_from_checkpoint=_A ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCamelCase_ =trainer.evaluate() trainer.log_metrics("""eval""" , _A ) trainer.save_metrics("""eval""" , _A ) # Write model card and (optionally) push to hub lowerCamelCase_ ={ """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**_A ) else: trainer.create_model_card(**_A ) if __name__ == "__main__": main()
719
from collections import namedtuple import requests from lxml import html # type: ignore __A : Dict = namedtuple('covid_data', 'cases deaths recovered') def __UpperCamelCase ( _A : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data: """simple docstring""" lowerCamelCase_ ="""//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) ) __A : Union[str, Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
75
0
'''simple docstring''' from __future__ import annotations __A : Dict = tuple[int, int, int] __A : List[str] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __A : Union[str, Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- __A : List[Any] = """EGZWVONAHDCLFQMSIPJBYUKXTR""" __A : List[Any] = """FOBHMDKEXQNRAULPGSJVTYICZW""" __A : Union[str, Any] = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- __A : Optional[Any] = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- __A : Any = """RMDJXFUWGISLHVTCQNKYPBEZOA""" __A : Optional[int] = """SGLCPQWZHKXAREONTFBVIYJUDM""" __A : Tuple = """HVSICLTYKQUBXDWAJZOMFGPREN""" __A : Dict = """RZWQHFMVDBKICJLNTUXAGYPSOE""" __A : int = """LFKIJODBEGAMQPXVUHYSTCZRWN""" __A : str = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def __UpperCamelCase ( _A : int , _A : int , _A : Any ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """simple docstring""" if (unique_rotsel := len(set(_A ) )) < 3: lowerCamelCase_ =f'Please use 3 unique rotors (not {unique_rotsel})' raise Exception(_A ) # Checks if rotor positions are valid lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =rotpos if not 0 < rotorposa <= len(_A ): lowerCamelCase_ =f'First rotor position is not within range of 1..26 ({rotorposa}' raise ValueError(_A ) if not 0 < rotorposa <= len(_A ): lowerCamelCase_ =f'Second rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(_A ) if not 0 < rotorposa <= len(_A ): lowerCamelCase_ =f'Third rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(_A ) # Validates string and returns dict lowerCamelCase_ =_plugboard(_A ) return rotpos, rotsel, pbdict def __UpperCamelCase ( _A : str ) ->dict[str, str]: """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase_ =f'Plugboard setting isn\'t type string ({type(_A )})' raise TypeError(_A ) elif len(_A ) % 2 != 0: lowerCamelCase_ =f'Odd number of symbols ({len(_A )})' raise Exception(_A ) elif pbstring == "": return {} pbstring.replace(""" """ , """""" ) # Checks if all characters are unique lowerCamelCase_ =set() for i in pbstring: if i not in abc: lowerCamelCase_ =f'\'{i}\' not in list of symbols' raise Exception(_A ) elif i in tmppbl: lowerCamelCase_ =f'Duplicate symbol ({i})' raise Exception(_A ) else: tmppbl.add(_A ) del tmppbl # Created the dictionary lowerCamelCase_ ={} for j in range(0 , len(_A ) - 1 , 2 ): lowerCamelCase_ =pbstring[j + 1] lowerCamelCase_ =pbstring[j] return pb def __UpperCamelCase ( _A : Tuple , _A : Union[str, Any] , _A : Union[str, Any] = (rotora, rotora, rotora) , _A : List[str] = "" , ) ->str: """simple docstring""" lowerCamelCase_ =text.upper() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_validator( _A , _A , plugb.upper() ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =rotor_position lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCamelCase_ =[] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCamelCase_ =plugboard[symbol] # rotor ra -------------------------- lowerCamelCase_ =abc.index(_A ) + rotorposa lowerCamelCase_ =rotora[index % len(_A )] # rotor rb -------------------------- lowerCamelCase_ =abc.index(_A ) + rotorposa lowerCamelCase_ =rotora[index % len(_A )] # rotor rc -------------------------- lowerCamelCase_ =abc.index(_A ) + rotorposa lowerCamelCase_ =rotora[index % len(_A )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCamelCase_ =reflector[symbol] # 2nd rotors lowerCamelCase_ =abc[rotora.index(_A ) - rotorposa] lowerCamelCase_ =abc[rotora.index(_A ) - rotorposa] lowerCamelCase_ =abc[rotora.index(_A ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCamelCase_ =plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_A ): lowerCamelCase_ =0 rotorposa += 1 if rotorposa >= len(_A ): lowerCamelCase_ =0 rotorposa += 1 if rotorposa >= len(_A ): lowerCamelCase_ =0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_A ) return "".join(_A ) if __name__ == "__main__": __A : Tuple = """This is my Python script that emulates the Enigma machine from WWII.""" __A : Optional[int] = (1, 1, 1) __A : List[str] = """pictures""" __A : Optional[int] = (rotora, rotora, rotora) __A : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb) print('Encrypted message:', en) print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
720
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
0
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__): _UpperCamelCase:Union[str, Any] = "philschmid/bart-large-cnn-samsum" _UpperCamelCase:Any = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) _UpperCamelCase:int = "summarizer" _UpperCamelCase:Union[str, Any] = AutoTokenizer _UpperCamelCase:Optional[int] = AutoModelForSeqaSeqLM _UpperCamelCase:Optional[int] = ["text"] _UpperCamelCase:List[Any] = ["text"] def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: return self.pre_processor(_lowercase , return_tensors="""pt""" , truncation=_lowercase ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str: return self.model.generate(**_lowercase )[0] def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: return self.pre_processor.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
721
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict: """simple docstring""" return float((preds == labels).mean() ) def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]: """simple docstring""" lowerCamelCase_ =simple_accuracy(_A , _A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) ) return { "accuracy": acc, "f1": fa, } def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int: """simple docstring""" lowerCamelCase_ ={} for id_pred, label in zip(_A , _A ): lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' lowerCamelCase_ =id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCamelCase_ =[(pred, label)] lowerCamelCase_ , lowerCamelCase_ =[], [] for question, preds_labels in question_map.items(): lowerCamelCase_ , lowerCamelCase_ =zip(*_A ) lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" ) fas.append(_A ) lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) ) ems.append(_A ) lowerCamelCase_ =float(sum(_A ) / len(_A ) ) lowerCamelCase_ =sum(_A ) / len(_A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _SCREAMING_SNAKE_CASE ( datasets.Metric): def _snake_case ( self )-> Union[str, Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _snake_case ( self )-> Optional[Any]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif self.config_name == "cb": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" ) elif self.config_name == "record": lowerCamelCase_ =[ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] elif self.config_name == "multirc": return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
75
0
import math def __UpperCamelCase ( _A : int ) ->int: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __UpperCamelCase ( _A : int = 10001 ) ->Dict: """simple docstring""" try: lowerCamelCase_ =int(snake_case__ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) lowerCamelCase_ =[] lowerCamelCase_ =2 while len(snake_case__ ) < nth: if is_prime(snake_case__ ): primes.append(snake_case__ ) num += 1 else: num += 1 return primes[len(snake_case__ ) - 1] if __name__ == "__main__": print(F"""{solution() = }""")
700
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : Union[str, Any] = logging.get_logger(__name__) __A : Optional[Any] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = "deta" _UpperCamelCase:int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , )-> str: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase_ =CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =backbone_config.pop("""model_type""" ) lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ =config_class.from_dict(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =backbone_config lowerCamelCase_ =num_queries lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =init_xavier_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =auxiliary_loss lowerCamelCase_ =position_embedding_type # deformable attributes lowerCamelCase_ =num_feature_levels lowerCamelCase_ =encoder_n_points lowerCamelCase_ =decoder_n_points lowerCamelCase_ =two_stage lowerCamelCase_ =two_stage_num_proposals lowerCamelCase_ =with_box_refine lowerCamelCase_ =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =mask_loss_coefficient lowerCamelCase_ =dice_loss_coefficient lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient lowerCamelCase_ =focal_alpha super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> int: return self.encoder_attention_heads @property def _snake_case ( self )-> int: return self.d_model def _snake_case ( self )-> str: lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.backbone_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
75
0
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __A : List[str] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __A : List[Any] = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __A : Any = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def __UpperCamelCase ( _A : str , _A : int ) ->List[str]: """simple docstring""" return float((preds == labels).mean() ) def __UpperCamelCase ( _A : Any , _A : Union[str, Any] , _A : Any="binary" ) ->Union[str, Any]: """simple docstring""" lowerCamelCase_ =simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ =float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE ) ) return { "accuracy": acc, "f1": fa, } def __UpperCamelCase ( _A : Union[str, Any] , _A : Dict ) ->List[str]: """simple docstring""" lowerCamelCase_ ={} for id_pred, label in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' lowerCamelCase_ =id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCamelCase_ =[(pred, label)] lowerCamelCase_ , lowerCamelCase_ =[], [] for question, preds_labels in question_map.items(): lowerCamelCase_ , lowerCamelCase_ =zip(*__SCREAMING_SNAKE_CASE ) lowerCamelCase_ =fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average="""macro""" ) fas.append(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(__SCREAMING_SNAKE_CASE ) ) ems.append(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ =float(sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ =float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _SCREAMING_SNAKE_CASE ( datasets.Metric): def _snake_case ( self )-> int: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _snake_case ( self )-> List[str]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif self.config_name == "cb": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" ) elif self.config_name == "record": lowerCamelCase_ =[ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] elif self.config_name == "multirc": return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
701
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig __A : int = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Any = "albert" def __init__( self , _SCREAMING_SNAKE_CASE=3_0000 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=1_6384 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , **_SCREAMING_SNAKE_CASE , )-> Optional[int]: super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =vocab_size lowerCamelCase_ =embedding_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_hidden_groups lowerCamelCase_ =num_attention_heads lowerCamelCase_ =inner_group_num lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =position_embedding_type class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ ={0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCamelCase_ ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
75
0
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:List[str] = FunnelTokenizer _UpperCamelCase:str = FunnelTokenizerFast _UpperCamelCase:str = True _UpperCamelCase:Dict = True def _snake_case ( self )-> int: super().setUp() lowerCamelCase_ =[ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Any: return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int: lowerCamelCase_ ="""UNwant\u00E9d,running""" lowerCamelCase_ ="""unwanted, running""" return input_text, output_text def _snake_case ( self )-> Optional[int]: lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer("""UNwant\u00E9d,running""" ) lowerCamelCase_ =len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
702
from collections import deque from math import floor from random import random from time import time class _SCREAMING_SNAKE_CASE : def __init__( self )-> List[str]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]: if self.graph.get(_SCREAMING_SNAKE_CASE ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: lowerCamelCase_ =[[w, v]] if not self.graph.get(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =[] def _snake_case ( self )-> str: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]: lowerCamelCase_ =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: return len(self.graph[u] ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]: lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return sorted_nodes def _snake_case ( self )-> str: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin class _SCREAMING_SNAKE_CASE : def __init__( self )-> Optional[Any]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]: # check if the u exists if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist lowerCamelCase_ =[[w, v]] # add the other way if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist lowerCamelCase_ =[[w, u]] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) # the other way round if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return len(self.graph[u] ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self )-> Optional[Any]: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin
75
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_) class _SCREAMING_SNAKE_CASE ( UpperCamelCase_): _UpperCamelCase:Any = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True}) _UpperCamelCase:List[Any] = Features({"text": Value("string")}) _UpperCamelCase:Optional[int] = Features({"labels": ClassLabel}) _UpperCamelCase:Any = "text" _UpperCamelCase:Optional[Any] = "labels" def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) lowerCamelCase_ =copy.deepcopy(self ) lowerCamelCase_ =self.label_schema.copy() lowerCamelCase_ =features[self.label_column] lowerCamelCase_ =label_schema return task_template @property def _snake_case ( self )-> List[Any]: return { self.text_column: "text", self.label_column: "labels", }
703
import os from datetime import datetime as dt from github import Github __A : Optional[int] = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase_ =g.get_repo("""huggingface/diffusers""" ) lowerCamelCase_ =repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase_ =sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase_ =comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
75
0
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename __A : Any = "http://www.mocksite.com/file1.txt" __A : Dict = "\"text\": [\"foo\", \"foo\"]" __A : Optional[Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class _SCREAMING_SNAKE_CASE : _UpperCamelCase:Optional[Any] = 2_00 _UpperCamelCase:Any = {"Content-Length": "100"} _UpperCamelCase:Dict = {} def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> str: return [bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )] def SCREAMING_SNAKE_CASE_ ( *_A : Dict , **_A : int ) ->List[Any]: """simple docstring""" return MockResponse() @pytest.mark.parametrize("""urls_type""" , [str, list, dict] ) def SCREAMING_SNAKE_CASE_ ( _A : Union[str, Any] , _A : Any , _A : List[str] ) ->Any: """simple docstring""" import requests monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase ) lowerCamelCase_ =URL if issubclass(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ =url elif issubclass(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ =[url] elif issubclass(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ ={"train": url} lowerCamelCase_ ="dummy" lowerCamelCase_ ="downloads" lowerCamelCase_ =tmp_path lowerCamelCase_ =DownloadConfig( cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , ) lowerCamelCase_ =DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase ) lowerCamelCase_ =dl_manager.download(_lowerCamelCase ) lowerCamelCase_ =urls for downloaded_paths in [downloaded_paths]: if isinstance(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ =[downloaded_paths] lowerCamelCase_ =[urls] elif isinstance(_lowerCamelCase , _lowerCamelCase ): assert "train" in downloaded_paths.keys() lowerCamelCase_ =downloaded_paths.values() lowerCamelCase_ =urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] lowerCamelCase_ =Path(_lowerCamelCase ) lowerCamelCase_ =downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() lowerCamelCase_ =downloaded_path.read_text() assert content == CONTENT lowerCamelCase_ =downloaded_path.with_suffix(""".json""" ) assert metadata_downloaded_path.exists() lowerCamelCase_ =json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("""paths_type""" , [str, list, dict] ) def SCREAMING_SNAKE_CASE_ ( _A : Any , _A : int , _A : Any ) ->Dict: """simple docstring""" lowerCamelCase_ =str(_lowerCamelCase ) if issubclass(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ =filename elif issubclass(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ =[filename] elif issubclass(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ ={"train": filename} lowerCamelCase_ ="dummy" lowerCamelCase_ =xz_file.parent lowerCamelCase_ ="extracted" lowerCamelCase_ =DownloadConfig( cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , ) lowerCamelCase_ =DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase ) lowerCamelCase_ =dl_manager.extract(_lowerCamelCase ) lowerCamelCase_ =paths for extracted_paths in [extracted_paths]: if isinstance(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ =[extracted_paths] lowerCamelCase_ =[paths] elif isinstance(_lowerCamelCase , _lowerCamelCase ): assert "train" in extracted_paths.keys() lowerCamelCase_ =extracted_paths.values() lowerCamelCase_ =paths.values() assert extracted_paths for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] lowerCamelCase_ =Path(_lowerCamelCase ) lowerCamelCase_ =extracted_path.parts assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() lowerCamelCase_ =extracted_path.read_text() lowerCamelCase_ =text_file.read_text() assert extracted_file_content == expected_file_content def SCREAMING_SNAKE_CASE_ ( _A : Any , _A : List[Any] ) ->List[str]: """simple docstring""" assert path.endswith(""".jsonl""" ) for num_items, line in enumerate(_lowerCamelCase , start=1 ): lowerCamelCase_ =json.loads(line.decode("""utf-8""" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] ) def SCREAMING_SNAKE_CASE_ ( _A : Tuple , _A : str ) ->List[str]: """simple docstring""" lowerCamelCase_ =request.getfixturevalue(_lowerCamelCase ) lowerCamelCase_ =DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): _test_jsonl(_lowerCamelCase , _lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] ) def SCREAMING_SNAKE_CASE_ ( _A : Optional[int] , _A : int ) ->int: """simple docstring""" lowerCamelCase_ =request.getfixturevalue(_lowerCamelCase ) lowerCamelCase_ =DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): _test_jsonl(_lowerCamelCase , _lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def SCREAMING_SNAKE_CASE_ ( _A : int ) ->List[str]: """simple docstring""" lowerCamelCase_ =DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ): assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
704
import argparse import os import re __A : Optional[Any] = 'src/diffusers' # Pattern that looks at the indentation in a line. __A : int = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __A : Dict = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __A : Optional[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __A : int = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __A : Optional[Any] = re.compile(R'\[([^\]]+)\]') def __UpperCamelCase ( _A : int ) ->Dict: """simple docstring""" lowerCamelCase_ =_re_indent.search(_A ) return "" if search is None else search.groups()[0] def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[int]="" , _A : int=None , _A : List[str]=None ) ->List[Any]: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 lowerCamelCase_ =["""\n""".join(lines[:index] )] else: lowerCamelCase_ =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase_ =[lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_A ) ) if index < len(_A ) - 1: lowerCamelCase_ =[lines[index + 1]] index += 1 else: lowerCamelCase_ =[] else: blocks.append("""\n""".join(_A ) ) lowerCamelCase_ =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append("""\n""".join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __UpperCamelCase ( _A : Optional[int] ) ->Optional[int]: """simple docstring""" def _inner(_A : Optional[Any] ): return key(_A ).lower().replace("""_""" , """""" ) return _inner def __UpperCamelCase ( _A : int , _A : List[Any]=None ) ->List[str]: """simple docstring""" # If no key is provided, we use a noop. def noop(_A : List[str] ): return x if key is None: lowerCamelCase_ =noop # Constants are all uppercase, they go first. lowerCamelCase_ =[obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase_ =[obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase_ =[obj for obj in objects if not key(_A )[0].isupper()] lowerCamelCase_ =ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def __UpperCamelCase ( _A : List[str] ) ->List[str]: """simple docstring""" # This inner function sort imports between [ ]. def _replace(_A : Optional[Any] ): lowerCamelCase_ =match.groups()[0] if "," not in imports: return f'[{imports}]' lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" lowerCamelCase_ =import_statement.split("""\n""" ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase_ =2 if lines[1].strip() == """[""" else 1 lowerCamelCase_ =[(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase_ =sort_objects(_A , key=lambda _A : x[1] ) lowerCamelCase_ =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase_ =_re_bracket_content.sub(_replace , lines[1] ) else: lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] lowerCamelCase_ =get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase_ =_re_bracket_content.sub(_replace , _A ) return import_statement def __UpperCamelCase ( _A : List[Any] , _A : Optional[Any]=True ) ->str: """simple docstring""" with open(_A , """r""" ) as f: lowerCamelCase_ =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase_ =split_code_in_indented_blocks( _A , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase_ =main_blocks[block_idx] lowerCamelCase_ =block.split("""\n""" ) # Get to the start of the imports. lowerCamelCase_ =0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase_ =len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase_ ="""\n""".join(block_lines[line_idx:-1] ) lowerCamelCase_ =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase_ =split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase_ =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase_ =[(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase_ =[(i, key) for i, key in enumerate(_A ) if key is not None] lowerCamelCase_ =[x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase_ =0 lowerCamelCase_ =[] for i in range(len(_A ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCamelCase_ =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase_ ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , """w""" ) as f: f.write("""\n""".join(_A ) ) def __UpperCamelCase ( _A : str=True ) ->List[Any]: """simple docstring""" lowerCamelCase_ =[] for root, _, files in os.walk(_A ): if "__init__.py" in files: lowerCamelCase_ =sort_imports(os.path.join(_A , """__init__.py""" ) , check_only=_A ) if result: lowerCamelCase_ =[os.path.join(_A , """__init__.py""" )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __A : Optional[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
75
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __A : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A : Optional[int] = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'google/electra-small-generator': 5_12, 'google/electra-base-generator': 5_12, 'google/electra-large-generator': 5_12, 'google/electra-small-discriminator': 5_12, 'google/electra-base-discriminator': 5_12, 'google/electra-large-discriminator': 5_12, } __A : Union[str, Any] = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE): _UpperCamelCase:Union[str, Any] = VOCAB_FILES_NAMES _UpperCamelCase:Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Optional[Any] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase:List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase:Tuple = ElectraTokenizer def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> str: super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , ) lowerCamelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , _a ) != do_lower_case or normalizer_state.get("""strip_accents""" , _a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars ): lowerCamelCase_ =getattr(_a , normalizer_state.pop("""type""" ) ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =strip_accents lowerCamelCase_ =tokenize_chinese_chars lowerCamelCase_ =normalizer_class(**_a ) lowerCamelCase_ =do_lower_case def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> Optional[int]: lowerCamelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[str]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> int: lowerCamelCase_ =self._tokenizer.model.save(_a , name=_a ) return tuple(_a )
705
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : str = {'vocab_file': 'sentencepiece.model'} __A : Optional[Any] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } __A : int = { 'google/rembert': 2_56, } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:List[Any] = VOCAB_FILES_NAMES _UpperCamelCase:Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , )-> str: super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =remove_space lowerCamelCase_ =keep_accents lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> Dict: return len(self.sp_model ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self )-> Optional[Any]: lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =d lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: lowerCamelCase_ =self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE ) return pieces def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE ) return out_string def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
75
0
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __A : Any = logging.get_logger(__name__) def __UpperCamelCase ( _A : Union[tf.Tensor, np.ndarray] ) ->List[str]: """simple docstring""" if isinstance(snake_case__ , np.ndarray ): return list(tensor.shape ) lowerCamelCase_ =tf.shape(snake_case__ ) if tensor.shape == tf.TensorShape(snake_case__ ): return dynamic lowerCamelCase_ =tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case__ )] def __UpperCamelCase ( _A : tf.Tensor , _A : Optional[int] = None , _A : Optional[str] = None ) ->List[Any]: """simple docstring""" return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case__ , name=snake_case__ ) def __UpperCamelCase ( _A : Union[str, Any] , _A : Any , _A : Any , _A : Dict=1E-5 , _A : int=-1 ) ->Tuple: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case__ , snake_case__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowerCamelCase_ =tf.nn.moments(snake_case__ , axes=[axis] , keepdims=snake_case__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowerCamelCase_ =[1] * inputs.shape.rank lowerCamelCase_ =shape_list(snake_case__ )[axis] lowerCamelCase_ =tf.reshape(snake_case__ , snake_case__ ) lowerCamelCase_ =tf.reshape(snake_case__ , snake_case__ ) # Compute layer normalization using the batch_normalization # function. lowerCamelCase_ =tf.nn.batch_normalization( snake_case__ , snake_case__ , snake_case__ , offset=snake_case__ , scale=snake_case__ , variance_epsilon=snake_case__ , ) return outputs def __UpperCamelCase ( _A : str , _A : Any=0 , _A : Tuple=-1 ) ->Union[str, Any]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowerCamelCase_ =tf.shape(snake_case__ ) lowerCamelCase_ =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowerCamelCase_ =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case__ , snake_case__ ) def __UpperCamelCase ( _A : tf.Tensor ) ->str: """simple docstring""" if not isinstance(snake_case__ , tf.Tensor ): lowerCamelCase_ =tf.convert_to_tensor(snake_case__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowerCamelCase_ =encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowerCamelCase_ =encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowerCamelCase_ =( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __UpperCamelCase ( _A : tf.Tensor , _A : int , _A : str = "input_ids" ) ->List[Any]: """simple docstring""" tf.debugging.assert_less( snake_case__ , tf.cast(snake_case__ , dtype=tensor.dtype ) , message=( f'The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case__ )}) must be smaller than the embedding ' f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.' ) , ) def __UpperCamelCase ( _A : Tuple , _A : int , _A : Optional[int] ) ->Union[str, Any]: """simple docstring""" lowerCamelCase_ =64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowerCamelCase_ =[x for x in data if len(snake_case__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} ' f'bytes: {bad_attributes}' ) lowerCamelCase_ =np.asarray(snake_case__ ) lowerCamelCase_ =1 lowerCamelCase_ =np.array_split(snake_case__ , snake_case__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowerCamelCase_ =np.array_split(snake_case__ , snake_case__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case__ ): lowerCamelCase_ =chunk_data else: lowerCamelCase_ =data def __UpperCamelCase ( _A : Optional[Any] , _A : List[str] ) ->List[Any]: """simple docstring""" if name in group.attrs: lowerCamelCase_ =[n.decode("""utf8""" ) if hasattr(snake_case__ , """decode""" ) else n for n in group.attrs[name]] else: lowerCamelCase_ =[] lowerCamelCase_ =0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(snake_case__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def __UpperCamelCase ( _A : str ) ->str: """simple docstring""" def _expand_single_ad_tensor(_A : Union[str, Any] ): if isinstance(snake_case__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case__ )
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[str]: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) ) self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) ) def _snake_case ( self )-> int: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =get_activation("""gelu_10""" ) lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self )-> Dict: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation("""bogus""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =1 lowerCamelCase_ =get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =acta.a
75
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __UpperCamelCase ( _A : Dict , _A : Optional[Any] , _A : Any = "x" , _A : Dict = 10**-10 , _A : List[Any] = 1 , ) ->complex: """simple docstring""" lowerCamelCase_ =symbols(__snake_case ) lowerCamelCase_ =lambdify(__snake_case , __snake_case ) lowerCamelCase_ =lambdify(__snake_case , diff(__snake_case , __snake_case ) ) lowerCamelCase_ =starting_point while True: if diff_function(__snake_case ) != 0: lowerCamelCase_ =prev_guess - multiplicity * func(__snake_case ) / diff_function( __snake_case ) else: raise ZeroDivisionError("""Could not find root""" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowerCamelCase_ =next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', F"""{newton_raphson("log(y) - 1", 2, variable="y")}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
707
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __UpperCamelCase ( ) ->List[str]: """simple docstring""" lowerCamelCase_ =_ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ =get_sagemaker_input() else: lowerCamelCase_ =get_cluster_input() return config def __UpperCamelCase ( _A : List[str]=None ) ->str: """simple docstring""" if subparsers is not None: lowerCamelCase_ =subparsers.add_parser("""config""" , description=_A ) else: lowerCamelCase_ =argparse.ArgumentParser("""Accelerate config command""" , description=_A ) parser.add_argument( """--config_file""" , default=_A , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =get_user_input() if args.config_file is not None: lowerCamelCase_ =args.config_file else: if not os.path.isdir(_A ): os.makedirs(_A ) lowerCamelCase_ =default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(_A ) else: config.to_yaml_file(_A ) print(f'accelerate configuration saved at {config_file}' ) def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =config_command_parser() lowerCamelCase_ =parser.parse_args() config_command(_A ) if __name__ == "__main__": main()
75
0
'''simple docstring''' __A : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def __UpperCamelCase ( ) ->None: """simple docstring""" lowerCamelCase_ =input("""Enter message: """ ) lowerCamelCase_ =input("""Enter key [alphanumeric]: """ ) lowerCamelCase_ =input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowerCamelCase_ ="""encrypt""" lowerCamelCase_ =encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) elif mode.lower().startswith("""d""" ): lowerCamelCase_ ="""decrypt""" lowerCamelCase_ =decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) print(f'\n{mode.title()}ed message:' ) print(UpperCAmelCase__ ) def __UpperCamelCase ( _A : str , _A : str ) ->str: """simple docstring""" return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" ) def __UpperCamelCase ( _A : str , _A : str ) ->str: """simple docstring""" return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" ) def __UpperCamelCase ( _A : str , _A : str , _A : str ) ->str: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =key.upper() for symbol in message: lowerCamelCase_ =LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase__ ): lowerCamelCase_ =0 else: translated.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": main()
708
def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[[] for _ in range(_A )] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(_A ) <= key: return input_string for position, character in enumerate(_A ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_A ) lowerCamelCase_ =["""""".join(_A ) for row in temp_grid] lowerCamelCase_ ="""""".join(_A ) return output_string def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string lowerCamelCase_ =[[] for _ in range(_A )] # generates template for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) lowerCamelCase_ =0 for row in temp_grid: # fills in the characters lowerCamelCase_ =input_string[counter : counter + len(_A )] grid.append(list(_A ) ) counter += len(_A ) lowerCamelCase_ ="""""" # reads as zigzag for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __UpperCamelCase ( _A : str ) ->dict[int, str]: """simple docstring""" lowerCamelCase_ ={} for key_guess in range(1 , len(_A ) ): # tries every key lowerCamelCase_ =decrypt(_A , _A ) return results if __name__ == "__main__": import doctest doctest.testmod()
75
0
import math def __lowercase ( _A : int ) ->str: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =0 while num > 0: lowerCamelCase_ =num % 8 lowerCamelCase_ =octal + (remainder * math.floor(math.pow(10 , _lowercase ) )) counter += 1 lowerCamelCase_ =math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f'0o{int(_lowercase )}' def __lowercase ( ) ->None: """simple docstring""" print("""\n2 in octal is:""" ) print(decimal_to_octal(2 ) ) # = 2 print("""\n8 in octal is:""" ) print(decimal_to_octal(8 ) ) # = 10 print("""\n65 in octal is:""" ) print(decimal_to_octal(65 ) ) # = 101 print("""\n216 in octal is:""" ) print(decimal_to_octal(216 ) ) # = 330 print("""\n512 in octal is:""" ) print(decimal_to_octal(512 ) ) # = 1000 print("""\n""" ) if __name__ == "__main__": main()
709
from typing import Any class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =data lowerCamelCase_ =None class _SCREAMING_SNAKE_CASE : def __init__( self )-> Any: lowerCamelCase_ =None def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.head while temp is not None: print(temp.data , end=""" """ ) lowerCamelCase_ =temp.next print() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =Node(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.head lowerCamelCase_ =new_node def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if node_data_a == node_data_a: return else: lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next if node_a is None or node_a is None: return lowerCamelCase_ , lowerCamelCase_ =node_a.data, node_a.data if __name__ == "__main__": __A : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
75
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __A : Optional[Any] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__): _UpperCamelCase:Any = ["input_features"] def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=1_6000 , _SCREAMING_SNAKE_CASE=160 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )-> str: super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCamelCase_ =n_fft lowerCamelCase_ =hop_length lowerCamelCase_ =chunk_length lowerCamelCase_ =chunk_length * sampling_rate lowerCamelCase_ =self.n_samples // hop_length lowerCamelCase_ =sampling_rate lowerCamelCase_ =mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> np.ndarray: lowerCamelCase_ =spectrogram( __lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) lowerCamelCase_ =log_spec[:, :-1] lowerCamelCase_ =np.maximum(__lowerCAmelCase , log_spec.max() - 8.0 ) lowerCamelCase_ =(log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 )-> List[np.ndarray]: if attention_mask is not None: lowerCamelCase_ =np.array(__lowerCAmelCase , np.intaa ) lowerCamelCase_ =[] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ): lowerCamelCase_ =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowerCamelCase_ =padding_value normed_input_values.append(__lowerCAmelCase ) else: lowerCamelCase_ =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "max_length" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase_ =isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase_ =is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase_ =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): lowerCamelCase_ =np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase_ =raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase_ =[np.asarray([raw_speech] ).T] lowerCamelCase_ =BatchFeature({"""input_features""": raw_speech} ) # convert into correct format for padding lowerCamelCase_ =self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowerCamelCase_ =self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) lowerCamelCase_ =np.stack(padded_inputs["""input_features"""] , axis=0 ) # make sure list is in array format lowerCamelCase_ =padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 ) lowerCamelCase_ =[self._np_extract_fbank_features(__lowerCAmelCase ) for waveform in input_features[0]] if isinstance(input_features[0] , __lowerCAmelCase ): lowerCamelCase_ =[np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features] else: lowerCamelCase_ =input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowerCamelCase_ =padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: lowerCamelCase_ =padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs def _snake_case ( self )-> Dict[str, Any]: lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
710
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Dict = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = "yolos" def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=[512, 864] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , )-> Tuple: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =qkv_bias lowerCamelCase_ =num_detection_tokens lowerCamelCase_ =use_mid_position_embeddings lowerCamelCase_ =auxiliary_loss # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = version.parse("1.11") @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self )-> float: return 1E-4 @property def _snake_case ( self )-> int: return 12
75
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Any = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'WavLMForAudioFrameClassification', 'WavLMForCTC', 'WavLMForSequenceClassification', 'WavLMForXVector', 'WavLMModel', 'WavLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
711
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __A : List[Any] = 'src/transformers' __A : Tuple = 'docs/source/en' __A : Optional[int] = '.' def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : Optional[Any] ) ->Optional[Any]: """simple docstring""" with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase_ =f.readlines() # Find the start prompt. lowerCamelCase_ =0 while not lines[start_index].startswith(_A ): start_index += 1 start_index += 1 lowerCamelCase_ =start_index while not lines[end_index].startswith(_A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __A : Dict = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. __A : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') __A : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __A : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # This is to make sure the transformers module imported is the one in the repo. __A : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) def __UpperCamelCase ( _A : List[Any] ) ->str: """simple docstring""" lowerCamelCase_ =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _A ) return [m.group(0 ) for m in matches] def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =2 if text == """✅""" or text == """❌""" else len(_A ) lowerCamelCase_ =(width - text_length) // 2 lowerCamelCase_ =width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __UpperCamelCase ( ) ->Any: """simple docstring""" lowerCamelCase_ =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowerCamelCase_ ={ name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowerCamelCase_ ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) # Let's lookup through all transformers object (once). for attr_name in dir(_A ): lowerCamelCase_ =None if attr_name.endswith("""Tokenizer""" ): lowerCamelCase_ =slow_tokenizers lowerCamelCase_ =attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowerCamelCase_ =fast_tokenizers lowerCamelCase_ =attr_name[:-13] elif _re_tf_models.match(_A ) is not None: lowerCamelCase_ =tf_models lowerCamelCase_ =_re_tf_models.match(_A ).groups()[0] elif _re_flax_models.match(_A ) is not None: lowerCamelCase_ =flax_models lowerCamelCase_ =_re_flax_models.match(_A ).groups()[0] elif _re_pt_models.match(_A ) is not None: lowerCamelCase_ =pt_models lowerCamelCase_ =_re_pt_models.match(_A ).groups()[0] if lookup_dict is not None: while len(_A ) > 0: if attr_name in model_name_to_prefix.values(): lowerCamelCase_ =True break # Try again after removing the last word in the name lowerCamelCase_ ="""""".join(camel_case_split(_A )[:-1] ) # Let's build that table! lowerCamelCase_ =list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowerCamelCase_ =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowerCamelCase_ =[len(_A ) + 2 for c in columns] lowerCamelCase_ =max([len(_A ) for name in model_names] ) + 2 # Build the table per se lowerCamelCase_ ="""|""" + """|""".join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowerCamelCase_ ={True: """✅""", False: """❌"""} for name in model_names: lowerCamelCase_ =model_name_to_prefix[name] lowerCamelCase_ =[ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n" return table def __UpperCamelCase ( _A : str=False ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_find_text_in_file( filename=os.path.join(_A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowerCamelCase_ =get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __A : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
75
0
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __A : Any = 'pt' elif is_tf_available(): __A : str = 'tf' else: __A : Optional[int] = 'jax' class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ , unittest.TestCase): _UpperCamelCase:List[Any] = PerceiverTokenizer _UpperCamelCase:List[str] = False def _snake_case ( self )-> Dict: super().setUp() lowerCamelCase_ =PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _snake_case ( self )-> Union[str, Any]: return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" ) def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> List[str]: return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=5 )-> Any: lowerCamelCase_ =[] for i in range(len(UpperCamelCase__ ) ): try: lowerCamelCase_ =tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCamelCase_ =list(filter(lambda _SCREAMING_SNAKE_CASE : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) ) lowerCamelCase_ =list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) ) if max_length is not None and len(UpperCamelCase__ ) > max_length: lowerCamelCase_ =toks[:max_length] if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0: while len(UpperCamelCase__ ) < min_length: lowerCamelCase_ =toks + toks # toks_str = [t[1] for t in toks] lowerCamelCase_ =[t[0] for t in toks] # Ensure consistency lowerCamelCase_ =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) if " " not in output_txt and len(UpperCamelCase__ ) > 1: lowerCamelCase_ =( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ ) ) if with_prefix_space: lowerCamelCase_ =''' ''' + output_txt lowerCamelCase_ =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) return output_txt, output_ids def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.perceiver_tokenizer lowerCamelCase_ ='''Unicode €.''' lowerCamelCase_ =tokenizer(UpperCamelCase__ ) lowerCamelCase_ =[4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ ) # decoding lowerCamelCase_ =tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , """[CLS]Unicode €.[SEP]""" ) lowerCamelCase_ =tokenizer("""e è é ê ë""" ) lowerCamelCase_ =[4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ ) # decoding lowerCamelCase_ =tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , """[CLS]e è é ê ë[SEP]""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" ) def _snake_case ( self )-> str: lowerCamelCase_ =self.perceiver_tokenizer lowerCamelCase_ =['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off lowerCamelCase_ =[4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on lowerCamelCase_ =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) if FRAMEWORK != "jax": lowerCamelCase_ =list(batch.input_ids.numpy()[0] ) else: lowerCamelCase_ =list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.perceiver_tokenizer lowerCamelCase_ =['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCamelCase_ =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , UpperCamelCase__ ) self.assertIn("""attention_mask""" , UpperCamelCase__ ) self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.perceiver_tokenizer lowerCamelCase_ =[ '''Summary of the text.''', '''Another summary.''', ] lowerCamelCase_ =tokenizer( text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def _snake_case ( self )-> Any: lowerCamelCase_ =self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowerCamelCase_ =self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =''' He is very happy, UNwant\u00E9d,running''' lowerCamelCase_ =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase_ =tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowerCamelCase_ =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) shutil.rmtree(UpperCamelCase__ ) lowerCamelCase_ =self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(["""bim""", """bambam"""] ) lowerCamelCase_ =tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) lowerCamelCase_ =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase_ =tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowerCamelCase_ =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowerCamelCase_ =tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase__ ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =[] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: lowerCamelCase_ =json.load(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: lowerCamelCase_ =json.load(UpperCamelCase__ ) lowerCamelCase_ =[f'<extra_id_{i}>' for i in range(125 )] lowerCamelCase_ =added_tokens_extra_ids + [ '''an_additional_special_token''' ] lowerCamelCase_ =added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCamelCase_ =tokenizer_class.from_pretrained( UpperCamelCase__ , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCamelCase_ =added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )] lowerCamelCase_ =tokenizer_class.from_pretrained( UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ =self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , """�""" ) def _snake_case ( self )-> int: pass def _snake_case ( self )-> int: pass def _snake_case ( self )-> Optional[Any]: pass def _snake_case ( self )-> Optional[Any]: pass def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): lowerCamelCase_ =['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] lowerCamelCase_ =tokenizer.convert_tokens_to_string(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
712
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =num_stages lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =out_features lowerCamelCase_ =num_labels lowerCamelCase_ =scope lowerCamelCase_ =num_stages def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def _snake_case ( self )-> List[Any]: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _snake_case ( self )-> Union[str, Any]: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _snake_case ( self )-> str: lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =config_and_inputs lowerCamelCase_ ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else () _UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Dict = False _UpperCamelCase:int = False _UpperCamelCase:Any = False _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Optional[Any] = False def _snake_case ( self )-> int: lowerCamelCase_ =UperNetModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )-> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )-> Tuple: return def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _snake_case ( self )-> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _snake_case ( self )-> str: pass def _snake_case ( self )-> Optional[int]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _snake_case ( self )-> Dict: pass @slow def _snake_case ( self )-> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( ) ->Tuple: """simple docstring""" lowerCamelCase_ =hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) lowerCamelCase_ =Image.open(_A ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[Any]: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) def _snake_case ( self )-> int: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
75
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:List[str] = CustomTokenizer pass
713
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = ["pixel_values"] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] ) lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( _SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature: lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ ={"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
75
0
def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[int] ) ->int: """simple docstring""" lowerCamelCase_ =len(_A ) lowerCamelCase_ =len(_A ) lowerCamelCase_ =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] lowerCamelCase_ =True for i in range(_A ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCamelCase_ =True if a[i].islower(): lowerCamelCase_ =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
714
# Imports import numpy as np class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: if red is not None: lowerCamelCase_ =red if green is not None: lowerCamelCase_ =green if blue is not None: lowerCamelCase_ =blue if red_edge is not None: lowerCamelCase_ =red_edge if nir is not None: lowerCamelCase_ =nir return True def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={ """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def _snake_case ( self )-> Optional[Any]: return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case ( self )-> Tuple: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case ( self )-> str: return self.nir * (self.red / (self.green**2)) def _snake_case ( self )-> Optional[int]: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case ( self )-> Tuple: return (self.nir - self.red) / (self.nir + self.red) def _snake_case ( self )-> Dict: return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case ( self )-> List[Any]: return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case ( self )-> Tuple: return (self.nir - self.green) / (self.nir + self.green) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case ( self )-> Tuple: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case ( self )-> Any: return (self.nir / self.green) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.nir / self.redEdge) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.red - self.blue) / self.red def _snake_case ( self )-> Dict: lowerCamelCase_ =self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case ( self )-> int: return self.nir - self.green def _snake_case ( self )-> Dict: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case ( self )-> List[str]: lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]: return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case ( self )-> int: return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: return (self.nir - b) / (a * self.red) def _snake_case ( self )-> int: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case ( self )-> Optional[Any]: return (self.red + self.green + self.blue) / 3_0.5 def _snake_case ( self )-> List[str]: return self.nir / self.red def _snake_case ( self )-> List[str]: return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case ( self )-> str: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case ( self )-> List[Any]: return self.green / (self.nir + self.red + self.green) def _snake_case ( self )-> Dict: return self.nir / (self.nir + self.red + self.green) def _snake_case ( self )-> List[str]: return self.red / (self.nir + self.red + self.green) def _snake_case ( self )-> int: return (self.green - self.red) / (self.green + self.red) def _snake_case ( self )-> str: return (self.red - self.green) / (self.red + self.green) def _snake_case ( self )-> str: lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case ( self )-> List[str]: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case ( self )-> List[Any]: return self.nir / self.red def _snake_case ( self )-> Optional[int]: return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case ( self )-> str: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
75
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A : int ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =[2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2] lowerCamelCase_ =True if """large""" in model_name or """huge""" in model_name else False lowerCamelCase_ =True if """large""" in model_name or """huge""" in model_name else False lowerCamelCase_ =True if """large""" in model_name or """huge""" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCamelCase_ =[3, 3, 3, 3] lowerCamelCase_ =[5, 5, 5, 5] elif "fl4" in model_name: lowerCamelCase_ =[4, 4, 4, 4] lowerCamelCase_ =[3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCamelCase_ =[3, 3, 3, 3] if "lrf" in model_name: lowerCamelCase_ =[3, 3, 3, 3] else: lowerCamelCase_ =[2, 2, 2, 2] if "tiny" in model_name: lowerCamelCase_ =96 elif "small" in model_name: lowerCamelCase_ =96 elif "base" in model_name: lowerCamelCase_ =128 elif "large" in model_name: lowerCamelCase_ =192 elif "xlarge" in model_name: lowerCamelCase_ =256 elif "huge" in model_name: lowerCamelCase_ =352 # set label information lowerCamelCase_ ="""huggingface/label-files""" if "large" in model_name or "huge" in model_name: lowerCamelCase_ ="""imagenet-22k-id2label.json""" else: lowerCamelCase_ ="""imagenet-1k-id2label.json""" lowerCamelCase_ =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase_ ={int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowerCamelCase_ ={v: k for k, v in idalabel.items()} lowerCamelCase_ =FocalNetConfig( embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , focal_levels=SCREAMING_SNAKE_CASE_ , focal_windows=SCREAMING_SNAKE_CASE_ , use_conv_embed=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , use_post_layernorm=SCREAMING_SNAKE_CASE_ , use_layerscale=SCREAMING_SNAKE_CASE_ , ) return config def __UpperCamelCase ( _A : List[Any] ) ->int: """simple docstring""" if "patch_embed.proj" in name: lowerCamelCase_ =name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowerCamelCase_ =name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: lowerCamelCase_ ="""encoder.""" + name if "encoder.layers" in name: lowerCamelCase_ =name.replace("""encoder.layers""" , """encoder.stages""" ) if "downsample.proj" in name: lowerCamelCase_ =name.replace("""downsample.proj""" , """downsample.projection""" ) if "blocks" in name: lowerCamelCase_ =name.replace("""blocks""" , """layers""" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCamelCase_ =name.replace("""modulation.f""" , """modulation.projection_in""" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCamelCase_ =name.replace("""modulation.h""" , """modulation.projection_context""" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCamelCase_ =name.replace("""modulation.proj""" , """modulation.projection_out""" ) if name == "norm.weight": lowerCamelCase_ ="""layernorm.weight""" if name == "norm.bias": lowerCamelCase_ ="""layernorm.bias""" if "head" in name: lowerCamelCase_ =name.replace("""head""" , """classifier""" ) else: lowerCamelCase_ ="""focalnet.""" + name return name def __UpperCamelCase ( _A : str , _A : Union[str, Any] , _A : Tuple=False ) ->List[Any]: """simple docstring""" # fmt: off lowerCamelCase_ ={ """focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""", """focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""", """focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""", """focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""", """focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""", """focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""", """focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""", """focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""", """focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""", """focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""", } # fmt: on lowerCamelCase_ =model_name_to_url[model_name] print("""Checkpoint URL: """ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""] # rename keys for key in state_dict.copy().keys(): lowerCamelCase_ =state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ =val lowerCamelCase_ =get_focalnet_config(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ =FocalNetForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() # load state dict model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # verify conversion lowerCamelCase_ ="""http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase_ =BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE_ , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ =Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) lowerCamelCase_ =processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) lowerCamelCase_ =transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCamelCase_ =image_transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) lowerCamelCase_ =model(**SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ =outputs.logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) print("""First values of logits:""" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCamelCase_ =torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCamelCase_ =torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCamelCase_ =torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCamelCase_ =torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCamelCase_ =torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCamelCase_ =torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'Pushing model and processor of {model_name} to the hub...' ) model.push_to_hub(f'{model_name}' ) processor.push_to_hub(f'{model_name}' ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) __A : List[str] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
715
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _SCREAMING_SNAKE_CASE ( __snake_case): def _snake_case ( self )-> Any: lowerCamelCase_ =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A_ , """tf_padding""" ) ) self.parent.assertTrue(hasattr(A_ , """depth_multiplier""" ) ) class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =image_size lowerCamelCase_ =depth_multiplier lowerCamelCase_ =min_depth lowerCamelCase_ =tf_padding lowerCamelCase_ =int(last_hidden_size * depth_multiplier ) lowerCamelCase_ =output_stride lowerCamelCase_ =hidden_act lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =use_labels lowerCamelCase_ =is_training lowerCamelCase_ =num_labels lowerCamelCase_ =initializer_range lowerCamelCase_ =scope def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels, pixel_labels def _snake_case ( self )-> List[str]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =MobileNetVaModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ =model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =self.num_labels lowerCamelCase_ =MobileNetVaForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ =model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self )-> Any: lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase): _UpperCamelCase:List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () _UpperCamelCase:Optional[int] = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) _UpperCamelCase:Optional[Any] = False _UpperCamelCase:List[str] = False _UpperCamelCase:int = False _UpperCamelCase:int = False def _snake_case ( self )-> Tuple: lowerCamelCase_ =MobileNetVaModelTester(self ) lowerCamelCase_ =MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_ ) def _snake_case ( self )-> Any: self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" ) def _snake_case ( self )-> Dict: pass @unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" ) def _snake_case ( self )-> Any: pass @unittest.skip(reason="""MobileNetV1 does not output attentions""" ) def _snake_case ( self )-> Optional[int]: pass def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(A_ ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["pixel_values"] self.assertListEqual(arg_names[:1] , A_ ) def _snake_case ( self )-> int: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _snake_case ( self )-> Union[str, Any]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ =outputs.hidden_states lowerCamelCase_ =26 self.assertEqual(len(A_ ) , A_ ) lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(A_ , A_ , A_ ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def _snake_case ( self )-> Dict: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =MobileNetVaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def __UpperCamelCase ( ) ->Any: """simple docstring""" lowerCamelCase_ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase): @cached_property def _snake_case ( self )-> str: return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None ) @slow def _snake_case ( self )-> List[str]: lowerCamelCase_ =MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(A_ ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_img() lowerCamelCase_ =image_processor(images=A_ , return_tensors="""pt""" ).to(A_ ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**A_ ) # verify the logits lowerCamelCase_ =torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase_ =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
716
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='%(message)s') def __UpperCamelCase ( _A : np.ndarray ) ->np.ndarray: """simple docstring""" return input_array.reshape((input_array.size, 1) ) def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.mean(1 ) # Centralize the data of class i lowerCamelCase_ =data - column_reshape(_A ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_A , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =np.dot(_A , centered_data.T ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =features.mean(1 ) lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.shape[1] lowerCamelCase_ =data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" # Check if the features have been loaded if features.any(): lowerCamelCase_ =features.mean(1 ) # Center the dataset lowerCamelCase_ =features - np.reshape(_A , (data_mean.size, 1) ) lowerCamelCase_ =np.dot(_A , centered_data.T ) / features.shape[1] lowerCamelCase_ , lowerCamelCase_ =np.linalg.eigh(_A ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCamelCase_ =np.dot(filtered_eigenvectors.T , _A ) logging.info("""Principal Component Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int ) ->np.ndarray: """simple docstring""" assert classes > dimensions # Check if features have been already loaded if features.any: lowerCamelCase_ , lowerCamelCase_ =eigh( covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , ) lowerCamelCase_ =eigenvectors[:, ::-1][:, :dimensions] lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =np.linalg.svd(_A ) lowerCamelCase_ =svd_matrix[:, 0:dimensions] lowerCamelCase_ =np.dot(filtered_svd_matrix.T , _A ) logging.info("""Linear Discriminant Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" # Create dummy dataset with 2 classes and 3 features lowerCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCamelCase_ =np.array([0, 0, 0, 1, 1] ) lowerCamelCase_ =2 lowerCamelCase_ =2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_A ) as error_info: lowerCamelCase_ =linear_discriminant_analysis( _A , _A , _A , _A ) if isinstance(_A , np.ndarray ): raise AssertionError( """Did not raise AssertionError for dimensions > classes""" ) assert error_info.type is AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" lowerCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCamelCase_ =2 lowerCamelCase_ =np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(_A ) as error_info: lowerCamelCase_ =principal_component_analysis(_A , _A ) if not np.allclose(_A , _A ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
75
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] __A : int = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
717
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __A : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') __A : str = F"""https://www.google.com/search?q={query}&num=100""" __A : int = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: __A : str = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: __A : Any = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
75
0
def __UpperCamelCase ( _A : Optional[int] , _A : List[Any] , _A : List[Any] ) ->int: """simple docstring""" lowerCamelCase_ =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __UpperCamelCase ( ) ->Any: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
718
from ..utils import DummyObject, requires_backends class _SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase__): _UpperCamelCase:List[Any] = ["torch", "torchsde"] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[Any]: requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]: requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str: requires_backends(cls , ["""torch""", """torchsde"""] )
75
0
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __A : List[str] = logging.get_logger(__name__) __A : Tuple = 'https://openaipublic.azureedge.net/jukebox/models/' __A : Any = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def __UpperCamelCase ( _A : Dict ) ->int: """simple docstring""" if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10: lowerCamelCase_ =key.replace(""".model.1.bias""" , """.conv1d_1.bias""" ) elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10: lowerCamelCase_ =key.replace(""".model.1.weight""" , """.conv1d_1.weight""" ) elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10: lowerCamelCase_ =key.replace(""".model.3.bias""" , """.conv1d_2.bias""" ) elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10: lowerCamelCase_ =key.replace(""".model.3.weight""" , """.conv1d_2.weight""" ) if "conditioner_blocks.0." in key: lowerCamelCase_ =key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" ) if "prime_prior" in key: lowerCamelCase_ =key.replace("""prime_prior""" , """encoder""" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ =key.replace(""".emb.""" , """.""" ) if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(""".k""" , """.codebook""" ) if "y_emb." in key: return key.replace("""y_emb.""" , """metadata_embedding.""" ) if "x_emb.emb." in key: lowerCamelCase_ =key.replace("""0.x_emb.emb""" , """embed_tokens""" ) if "prime_state_ln" in key: return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" ) if ".ln" in key: return key.replace(""".ln""" , """.layer_norm""" ) if "_ln" in key: return key.replace("""_ln""" , """_layer_norm""" ) if "prime_state_proj" in key: return key.replace("""prime_state_proj""" , """encoder.proj_in""" ) if "prime_x_out" in key: return key.replace("""prime_x_out""" , """encoder.lm_head""" ) if "prior.x_out" in key: return key.replace("""x_out""" , """fc_proj_out""" ) if "x_emb" in key: return key.replace("""x_emb""" , """embed_tokens""" ) return key def __UpperCamelCase ( _A : Dict , _A : Optional[Any] , _A : Any , _A : List[Any] ) ->Union[str, Any]: """simple docstring""" lowerCamelCase_ ={} import re lowerCamelCase_ =re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) lowerCamelCase_ =re.compile( R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowerCamelCase_ =re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) lowerCamelCase_ =re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) lowerCamelCase_ =re.compile( R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowerCamelCase_ =re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) lowerCamelCase_ =re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" ) lowerCamelCase_ =re.compile( R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowerCamelCase_ =re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowercase__ ): lowerCamelCase_ =re_encoder_block_conv_in.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ =f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' lowerCamelCase_ =re_encoder_block_conv_in.sub(lowercase__ , lowercase__ ) elif re_encoder_block_resnet.fullmatch(lowercase__ ): lowerCamelCase_ =re_encoder_block_resnet.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ ={"""1""": 1, """3""": 2}[groups[-2]] lowerCamelCase_ =f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' lowerCamelCase_ =f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_encoder_block_resnet.sub(lowercase__ , lowercase__ ) elif re_encoder_block_proj_out.fullmatch(lowercase__ ): lowerCamelCase_ =re_encoder_block_proj_out.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' lowerCamelCase_ =re_encoder_block_proj_out.sub(lowercase__ , lowercase__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowercase__ ): lowerCamelCase_ =re_decoder_block_conv_out.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ =f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' lowerCamelCase_ =re_decoder_block_conv_out.sub(lowercase__ , lowercase__ ) elif re_decoder_block_resnet.fullmatch(lowercase__ ): lowerCamelCase_ =re_decoder_block_resnet.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ ={"""1""": 1, """3""": 2}[groups[-2]] lowerCamelCase_ =f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' lowerCamelCase_ =f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_decoder_block_resnet.sub(lowercase__ , lowercase__ ) elif re_decoder_block_proj_in.fullmatch(lowercase__ ): lowerCamelCase_ =re_decoder_block_proj_in.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' lowerCamelCase_ =re_decoder_block_proj_in.sub(lowercase__ , lowercase__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowercase__ ): lowerCamelCase_ =re_prior_cond_conv_out.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ =f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' lowerCamelCase_ =re_prior_cond_conv_out.sub(lowercase__ , lowercase__ ) elif re_prior_cond_resnet.fullmatch(lowercase__ ): lowerCamelCase_ =re_prior_cond_resnet.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ ={"""1""": 1, """3""": 2}[groups[-2]] lowerCamelCase_ =f'conditioner_blocks.upsampler.upsample_block.{block_index}.' lowerCamelCase_ =f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_prior_cond_resnet.sub(lowercase__ , lowercase__ ) elif re_prior_cond_proj_in.fullmatch(lowercase__ ): lowerCamelCase_ =re_prior_cond_proj_in.match(lowercase__ ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =f'conditioner_blocks.upsampler.proj_in.{groups[-1]}' lowerCamelCase_ =re_prior_cond_proj_in.sub(lowercase__ , lowercase__ ) # keep original key else: lowerCamelCase_ =original_key lowerCamelCase_ =replace_key(lowercase__ ) if f'{key_prefix}.{key}' not in model_state_dict or key is None: print(f'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape: lowerCamelCase_ =model_state_dict[f'{key_prefix}.{key}'] print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) lowerCamelCase_ =original_key lowerCamelCase_ =original_key lowerCamelCase_ =value return new_dict @torch.no_grad() def __UpperCamelCase ( _A : int=None , _A : List[str]=None ) ->List[Any]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): lowerCamelCase_ =requests.get(f'{PREFIX}{file}' , allow_redirects=lowercase__ ) os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=lowercase__ ) open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content ) lowerCamelCase_ =MODEL_MAPPING[model_name.split("""/""" )[-1]] lowerCamelCase_ =JukeboxConfig.from_pretrained(lowercase__ ) lowerCamelCase_ =JukeboxModel(lowercase__ ) lowerCamelCase_ =[] lowerCamelCase_ ={} for i, dict_name in enumerate(lowercase__ ): lowerCamelCase_ =torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""] lowerCamelCase_ ={} for k in old_dic.keys(): if k.endswith(""".b""" ): lowerCamelCase_ =old_dic[k] elif k.endswith(""".w""" ): lowerCamelCase_ =old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ =old_dic[k] else: lowerCamelCase_ =old_dic[k] lowerCamelCase_ ="""vqvae""" if i == 0 else f'priors.{3 - i}' lowerCamelCase_ =fix_jukebox_keys(lowercase__ , model.state_dict() , lowercase__ , lowercase__ ) weight_dict.append(lowercase__ ) lowerCamelCase_ =weight_dict.pop(0 ) model.vqvae.load_state_dict(lowercase__ ) for i in range(len(lowercase__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile: json.dump(lowercase__ , lowercase__ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase__ ) return weight_dict if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) __A : Any = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
719
from collections import namedtuple import requests from lxml import html # type: ignore __A : Dict = namedtuple('covid_data', 'cases deaths recovered') def __UpperCamelCase ( _A : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data: """simple docstring""" lowerCamelCase_ ="""//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) ) __A : Union[str, Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
75
0
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __A : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name __A : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __UpperCamelCase ( _A : int , _A : str , _A : int=8 ) ->List[str]: """simple docstring""" lowerCamelCase_ =height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowerCamelCase_ =width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _SCREAMING_SNAKE_CASE ( __lowercase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]: super().__init__() self.register_modules( unet=__a , scheduler=__a , movq=__a , ) lowerCamelCase_ =2 ** (len(self.movq.config.block_out_channels ) - 1) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: if latents is None: lowerCamelCase_ =randn_tensor(__a , generator=__a , device=__a , dtype=__a ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) lowerCamelCase_ =latents.to(__a ) lowerCamelCase_ =latents * scheduler.init_noise_sigma return latents def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 )-> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) lowerCamelCase_ =torch.device(f'cuda:{gpu_id}' ) lowerCamelCase_ =[ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 )-> List[str]: if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) lowerCamelCase_ =torch.device(f'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=__a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowerCamelCase_ =None for cpu_offloaded_model in [self.unet, self.movq]: lowerCamelCase_ =cpu_offload_with_hook(__a , __a , prev_module_hook=__a ) # We'll offload the last model manually. lowerCamelCase_ =hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _snake_case ( self )-> Union[str, Any]: if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(__a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__a ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , )-> Optional[int]: lowerCamelCase_ =self._execution_device lowerCamelCase_ =guidance_scale > 1.0 if isinstance(__a , __a ): lowerCamelCase_ =torch.cat(__a , dim=0 ) lowerCamelCase_ =image_embeds.shape[0] * num_images_per_prompt if isinstance(__a , __a ): lowerCamelCase_ =torch.cat(__a , dim=0 ) if do_classifier_free_guidance: lowerCamelCase_ =image_embeds.repeat_interleave(__a , dim=0 ) lowerCamelCase_ =negative_image_embeds.repeat_interleave(__a , dim=0 ) lowerCamelCase_ =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a ) self.scheduler.set_timesteps(__a , device=__a ) lowerCamelCase_ =self.scheduler.timesteps lowerCamelCase_ =self.unet.config.in_channels lowerCamelCase_ =downscale_height_and_width(__a , __a , self.movq_scale_factor ) # create initial latent lowerCamelCase_ =self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __a , __a , __a , self.scheduler , ) for i, t in enumerate(self.progress_bar(__a ) ): # expand the latents if we are doing classifier free guidance lowerCamelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase_ ={"""image_embeds""": image_embeds} lowerCamelCase_ =self.unet( sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0] if do_classifier_free_guidance: lowerCamelCase_ =noise_pred.split(latents.shape[1] , dim=1 ) lowerCamelCase_ =noise_pred.chunk(2 ) lowerCamelCase_ =variance_pred.chunk(2 ) lowerCamelCase_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowerCamelCase_ =torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowerCamelCase_ =noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( __a , __a , __a , generator=__a , )[0] # post-processing lowerCamelCase_ =self.movq.decode(__a , force_not_quantize=__a )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: lowerCamelCase_ =image * 0.5 + 0.5 lowerCamelCase_ =image.clamp(0 , 1 ) lowerCamelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(__a ) if not return_dict: return (image,) return ImagePipelineOutput(images=__a )
720
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
0
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __A : Dict = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , )-> str: lowerCamelCase_ =size if size is not None else {"""height""": 20, """width""": 20} lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =image_size lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution lowerCamelCase_ =size lowerCamelCase_ =do_normalize lowerCamelCase_ =do_convert_rgb lowerCamelCase_ =[512, 1024, 2048, 4096] lowerCamelCase_ =patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def _snake_case ( self )-> Any: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _snake_case ( self )-> List[Any]: lowerCamelCase_ ="""https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" lowerCamelCase_ =Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[Any] = PixaStructImageProcessor if is_vision_available() else None def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =PixaStructImageProcessingTester(self ) @property def _snake_case ( self )-> Dict: return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self )-> Any: lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , """do_convert_rgb""" ) ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.image_processor_tester.prepare_dummy_image() lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) lowerCamelCase_ =2048 lowerCamelCase_ =image_processor(UpperCAmelCase__ , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) ) def _snake_case ( self )-> Any: # Initialize image_processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCamelCase_ =image_processor( UpperCAmelCase__ , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _snake_case ( self )-> Any: # Initialize image_processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 lowerCamelCase_ =True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(UpperCAmelCase__ ): lowerCamelCase_ =image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches lowerCamelCase_ ="""Hello""" lowerCamelCase_ =image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCamelCase_ =image_processor( UpperCAmelCase__ , return_tensors="""pt""" , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _snake_case ( self )-> Optional[int]: # Initialize image_processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) lowerCamelCase_ =( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCamelCase_ =image_processor( UpperCAmelCase__ , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _snake_case ( self )-> str: # Initialize image_processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCamelCase_ =image_processor( UpperCAmelCase__ , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[Any] = PixaStructImageProcessor if is_vision_available() else None def _snake_case ( self )-> Dict: lowerCamelCase_ =PixaStructImageProcessingTester(self , num_channels=4 ) lowerCamelCase_ =3 @property def _snake_case ( self )-> Dict: return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self )-> Any: lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , """do_convert_rgb""" ) ) def _snake_case ( self )-> List[Any]: # Initialize image_processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCamelCase_ =image_processor( UpperCAmelCase__ , return_tensors="""pt""" , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
721
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict: """simple docstring""" return float((preds == labels).mean() ) def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]: """simple docstring""" lowerCamelCase_ =simple_accuracy(_A , _A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) ) return { "accuracy": acc, "f1": fa, } def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int: """simple docstring""" lowerCamelCase_ ={} for id_pred, label in zip(_A , _A ): lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' lowerCamelCase_ =id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCamelCase_ =[(pred, label)] lowerCamelCase_ , lowerCamelCase_ =[], [] for question, preds_labels in question_map.items(): lowerCamelCase_ , lowerCamelCase_ =zip(*_A ) lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" ) fas.append(_A ) lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) ) ems.append(_A ) lowerCamelCase_ =float(sum(_A ) / len(_A ) ) lowerCamelCase_ =sum(_A ) / len(_A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _SCREAMING_SNAKE_CASE ( datasets.Metric): def _snake_case ( self )-> Union[str, Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _snake_case ( self )-> Optional[Any]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif self.config_name == "cb": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" ) elif self.config_name == "record": lowerCamelCase_ =[ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] elif self.config_name == "multirc": return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
75
0
from __future__ import annotations __A : Union[str, Any] = 'Muhammad Umer Farooq' __A : int = 'MIT' __A : Tuple = '1.0.0' __A : int = 'Muhammad Umer Farooq' __A : List[Any] = 'contact@muhammadumerfarooq.me' __A : Optional[Any] = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class _SCREAMING_SNAKE_CASE ( __a): def __init__( self , _SCREAMING_SNAKE_CASE )-> None: super().__init__() lowerCamelCase_ =[] lowerCamelCase_ =domain def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None: # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: lowerCamelCase_ =parse.urljoin(self.domain , A__ ) self.urls.append(A__ ) def __UpperCamelCase ( _A : str ) ->str: """simple docstring""" return ".".join(get_sub_domain_name(_A ).split(""".""" )[-2:] ) def __UpperCamelCase ( _A : str ) ->str: """simple docstring""" return parse.urlparse(_A ).netloc def __UpperCamelCase ( _A : str = "https://github.com" ) ->list[str]: """simple docstring""" lowerCamelCase_ =get_domain_name(_A ) # Initialize the parser lowerCamelCase_ =Parser(_A ) try: # Open URL lowerCamelCase_ =requests.get(_A ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through lowerCamelCase_ =set() for link in parser.urls: # open URL. # read = requests.get(link) try: lowerCamelCase_ =requests.get(_A ) # Get the valid email. lowerCamelCase_ =re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_A ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_A ) if __name__ == "__main__": __A : Optional[int] = emails_from_url('https://github.com') print(F"""{len(emails)} emails found:""") print('\n'.join(sorted(emails)))
700
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : Union[str, Any] = logging.get_logger(__name__) __A : Optional[Any] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = "deta" _UpperCamelCase:int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , )-> str: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase_ =CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =backbone_config.pop("""model_type""" ) lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ =config_class.from_dict(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =backbone_config lowerCamelCase_ =num_queries lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =init_xavier_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =auxiliary_loss lowerCamelCase_ =position_embedding_type # deformable attributes lowerCamelCase_ =num_feature_levels lowerCamelCase_ =encoder_n_points lowerCamelCase_ =decoder_n_points lowerCamelCase_ =two_stage lowerCamelCase_ =two_stage_num_proposals lowerCamelCase_ =with_box_refine lowerCamelCase_ =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =mask_loss_coefficient lowerCamelCase_ =dice_loss_coefficient lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient lowerCamelCase_ =focal_alpha super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> int: return self.encoder_attention_heads @property def _snake_case ( self )-> int: return self.d_model def _snake_case ( self )-> str: lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.backbone_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
75
0
__A : Dict = { 'joule': 1.0, 'kilojoule': 10_00, 'megajoule': 1_00_00_00, 'gigajoule': 10_00_00_00_00, 'wattsecond': 1.0, 'watthour': 36_00, 'kilowatthour': 3_60_00_00, 'newtonmeter': 1.0, 'calorie_nutr': 41_86.8, 'kilocalorie_nutr': 4_18_68_00.00, 'electronvolt': 1.6_02_17_66_34E-19, 'britishthermalunit_it': 10_55.0_55_85, 'footpound': 1.35_58_18, } def __UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] , _A : Union[str, Any] ) ->float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCamelCase_ =( f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n' f'Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
701
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig __A : int = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Any = "albert" def __init__( self , _SCREAMING_SNAKE_CASE=3_0000 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=1_6384 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , **_SCREAMING_SNAKE_CASE , )-> Optional[int]: super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =vocab_size lowerCamelCase_ =embedding_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_hidden_groups lowerCamelCase_ =num_attention_heads lowerCamelCase_ =inner_group_num lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =position_embedding_type class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ ={0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCamelCase_ ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
75
0
from collections import defaultdict def __UpperCamelCase ( _A : Any , _A : List[Any] ) ->Dict: """simple docstring""" lowerCamelCase_ =first_str.lower().strip() lowerCamelCase_ =second_str.lower().strip() # Remove whitespace lowerCamelCase_ =first_str.replace(""" """ , """""" ) lowerCamelCase_ =second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(lowercase__ ) != len(lowercase__ ): return False # Default values for count should be 0 lowerCamelCase_ =defaultdict(lowercase__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(lowercase__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __A : Union[str, Any] = input('Enter the first string ').strip() __A : str = input('Enter the second string ').strip() __A : Dict = check_anagrams(input_a, input_b) print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
702
from collections import deque from math import floor from random import random from time import time class _SCREAMING_SNAKE_CASE : def __init__( self )-> List[str]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]: if self.graph.get(_SCREAMING_SNAKE_CASE ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: lowerCamelCase_ =[[w, v]] if not self.graph.get(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =[] def _snake_case ( self )-> str: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]: lowerCamelCase_ =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: return len(self.graph[u] ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]: lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return sorted_nodes def _snake_case ( self )-> str: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin class _SCREAMING_SNAKE_CASE : def __init__( self )-> Optional[Any]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]: # check if the u exists if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist lowerCamelCase_ =[[w, v]] # add the other way if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist lowerCamelCase_ =[[w, u]] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) # the other way round if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return len(self.graph[u] ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self )-> Optional[Any]: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin
75
0
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[Any]: """simple docstring""" if len(lowerCAmelCase_ ) < 2: return collection def circle_sort_util(_A : Any , _A : Any , _A : str ) -> bool: lowerCamelCase_ =False if low == high: return swapped lowerCamelCase_ =low lowerCamelCase_ =high while left < right: if collection[left] > collection[right]: lowerCamelCase_ =( collection[right], collection[left], ) lowerCamelCase_ =True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowerCamelCase_ =( collection[right + 1], collection[left], ) lowerCamelCase_ =True lowerCamelCase_ =low + int((high - low) / 2 ) lowerCamelCase_ =circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCamelCase_ =circle_sort_util(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) return swapped or left_swap or right_swap lowerCamelCase_ =True while is_not_sorted is True: lowerCamelCase_ =circle_sort_util(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) - 1 ) return collection if __name__ == "__main__": __A : Optional[Any] = input('Enter numbers separated by a comma:\n').strip() __A : Optional[int] = [int(item) for item in user_input.split(',')] print(circle_sort(unsorted))
703
import os from datetime import datetime as dt from github import Github __A : Optional[int] = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase_ =g.get_repo("""huggingface/diffusers""" ) lowerCamelCase_ =repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase_ =sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase_ =comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
75
0
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __A : str = pytest.mark.integration __A : Union[str, Any] = {'comet'} __A : Optional[int] = importlib.util.find_spec('fairseq') is not None __A : str = {'code_eval'} __A : List[str] = os.name == 'nt' __A : Any = {'bertscore', 'frugalscore', 'perplexity'} __A : Any = importlib.util.find_spec('transformers') is not None def SCREAMING_SNAKE_CASE_ ( _A : List[str] ) ->str: """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self : Any , _A : List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def SCREAMING_SNAKE_CASE_ ( _A : Optional[int] ) ->int: """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self : Dict , _A : List[str] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def SCREAMING_SNAKE_CASE_ ( _A : Tuple ) ->Optional[Any]: """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self : List[str] , _A : Dict ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def SCREAMING_SNAKE_CASE_ ( ) ->Any: """simple docstring""" lowerCamelCase_ =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names()) @for_all_test_methods( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) @local class _SCREAMING_SNAKE_CASE ( parameterized.TestCase): _UpperCamelCase:Optional[Any] = {} _UpperCamelCase:Any = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict: lowerCamelCase_ ="""[...]""" lowerCamelCase_ =importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path ) lowerCamelCase_ =datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase__ ) # check parameters lowerCamelCase_ =inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowercase__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCamelCase_ =doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int: lowerCamelCase_ ="""[...]""" lowerCamelCase_ =importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCamelCase_ =doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any: if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase__ ): yield else: yield @contextmanager def _snake_case ( self )-> Tuple: def load_local_metric(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return load_metric(os.path.join("""metrics""" , lowercase__ ) , *lowercase__ , **lowercase__ ) with patch("""datasets.load_metric""" ) as mock_load_metric: lowerCamelCase_ =load_local_metric yield @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE )-> Optional[Any]: def wrapper(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =contextmanager(lowercase__ ) lowerCamelCase_ =patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def SCREAMING_SNAKE_CASE_ ( _A : Union[str, Any] ) ->List[str]: """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase): def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict: assert len(input_dict["""input_ids"""] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: lowerCamelCase_ =MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def SCREAMING_SNAKE_CASE_ ( _A : List[Any] ) ->Optional[Any]: """simple docstring""" import torch def bert_cos_score_idf(_A : str , _A : Dict , *_A : List[Any] , **_A : str ): return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: lowerCamelCase_ =bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def SCREAMING_SNAKE_CASE_ ( _A : Optional[Any] ) ->List[str]: """simple docstring""" def load_from_checkpoint(_A : int ): class _SCREAMING_SNAKE_CASE : def _snake_case ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[Any]: assert len(lowercase__ ) == 2 lowerCamelCase_ =[0.1_9, 0.9_2] return scores, sum(lowercase__ ) / len(lowercase__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: lowerCamelCase_ =None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: lowerCamelCase_ =load_from_checkpoint yield def SCREAMING_SNAKE_CASE_ ( ) ->Any: """simple docstring""" lowerCamelCase_ =load_metric(os.path.join("""metrics""" , """seqeval""" ) ) lowerCamelCase_ ="""ERROR""" lowerCamelCase_ =f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}' with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ): metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
704
import argparse import os import re __A : Optional[Any] = 'src/diffusers' # Pattern that looks at the indentation in a line. __A : int = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __A : Dict = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __A : Optional[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __A : int = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __A : Optional[Any] = re.compile(R'\[([^\]]+)\]') def __UpperCamelCase ( _A : int ) ->Dict: """simple docstring""" lowerCamelCase_ =_re_indent.search(_A ) return "" if search is None else search.groups()[0] def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[int]="" , _A : int=None , _A : List[str]=None ) ->List[Any]: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 lowerCamelCase_ =["""\n""".join(lines[:index] )] else: lowerCamelCase_ =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase_ =[lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_A ) ) if index < len(_A ) - 1: lowerCamelCase_ =[lines[index + 1]] index += 1 else: lowerCamelCase_ =[] else: blocks.append("""\n""".join(_A ) ) lowerCamelCase_ =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append("""\n""".join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __UpperCamelCase ( _A : Optional[int] ) ->Optional[int]: """simple docstring""" def _inner(_A : Optional[Any] ): return key(_A ).lower().replace("""_""" , """""" ) return _inner def __UpperCamelCase ( _A : int , _A : List[Any]=None ) ->List[str]: """simple docstring""" # If no key is provided, we use a noop. def noop(_A : List[str] ): return x if key is None: lowerCamelCase_ =noop # Constants are all uppercase, they go first. lowerCamelCase_ =[obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase_ =[obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase_ =[obj for obj in objects if not key(_A )[0].isupper()] lowerCamelCase_ =ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def __UpperCamelCase ( _A : List[str] ) ->List[str]: """simple docstring""" # This inner function sort imports between [ ]. def _replace(_A : Optional[Any] ): lowerCamelCase_ =match.groups()[0] if "," not in imports: return f'[{imports}]' lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" lowerCamelCase_ =import_statement.split("""\n""" ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase_ =2 if lines[1].strip() == """[""" else 1 lowerCamelCase_ =[(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase_ =sort_objects(_A , key=lambda _A : x[1] ) lowerCamelCase_ =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase_ =_re_bracket_content.sub(_replace , lines[1] ) else: lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] lowerCamelCase_ =get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase_ =_re_bracket_content.sub(_replace , _A ) return import_statement def __UpperCamelCase ( _A : List[Any] , _A : Optional[Any]=True ) ->str: """simple docstring""" with open(_A , """r""" ) as f: lowerCamelCase_ =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase_ =split_code_in_indented_blocks( _A , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase_ =main_blocks[block_idx] lowerCamelCase_ =block.split("""\n""" ) # Get to the start of the imports. lowerCamelCase_ =0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase_ =len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase_ ="""\n""".join(block_lines[line_idx:-1] ) lowerCamelCase_ =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase_ =split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase_ =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase_ =[(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase_ =[(i, key) for i, key in enumerate(_A ) if key is not None] lowerCamelCase_ =[x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase_ =0 lowerCamelCase_ =[] for i in range(len(_A ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCamelCase_ =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase_ ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , """w""" ) as f: f.write("""\n""".join(_A ) ) def __UpperCamelCase ( _A : str=True ) ->List[Any]: """simple docstring""" lowerCamelCase_ =[] for root, _, files in os.walk(_A ): if "__init__.py" in files: lowerCamelCase_ =sort_imports(os.path.join(_A , """__init__.py""" ) , check_only=_A ) if result: lowerCamelCase_ =[os.path.join(_A , """__init__.py""" )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __A : Optional[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
75
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : str = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE): _UpperCamelCase:List[str] = "data2vec-text" def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> Optional[int]: super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =classifier_dropout class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE): @property def _snake_case ( self )-> Tuple: if self.task == "multiple-choice": lowerCamelCase_ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase_ ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
705
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : str = {'vocab_file': 'sentencepiece.model'} __A : Optional[Any] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } __A : int = { 'google/rembert': 2_56, } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:List[Any] = VOCAB_FILES_NAMES _UpperCamelCase:Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , )-> str: super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =remove_space lowerCamelCase_ =keep_accents lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> Dict: return len(self.sp_model ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self )-> Optional[Any]: lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =d lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: lowerCamelCase_ =self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE ) return pieces def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE ) return out_string def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
75
0
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[str]: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) ) self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) ) def _snake_case ( self )-> int: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =get_activation("""gelu_10""" ) lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self )-> Dict: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation("""bogus""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =1 lowerCamelCase_ =get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =acta.a
75
0
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=[2, 3, 4] , _SCREAMING_SNAKE_CASE=None , )-> Dict: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =num_stages lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =num_labels lowerCamelCase_ =initializer_range lowerCamelCase_ =out_features lowerCamelCase_ =out_indices lowerCamelCase_ =scope def _snake_case ( self )-> Any: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def _snake_case ( self )-> int: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: lowerCamelCase_ =ConvNextVaModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase_ =model(__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =ConvNextVaForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase_ =model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =ConvNextVaBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase_ =model(__a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase_ =None lowerCamelCase_ =ConvNextVaBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase_ =model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={"pixel_values": pixel_values} return config, inputs_dict def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): _UpperCamelCase:Tuple = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCamelCase:List[Any] = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCamelCase:Dict = False _UpperCamelCase:List[Any] = False _UpperCamelCase:Dict = False _UpperCamelCase:Union[str, Any] = False _UpperCamelCase:Union[str, Any] = False def _snake_case ( self )-> List[Any]: lowerCamelCase_ =ConvNextVaModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _snake_case ( self )-> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )-> str: return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def _snake_case ( self )-> Tuple: pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def _snake_case ( self )-> Tuple: pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def _snake_case ( self )-> Tuple: pass def _snake_case ( self )-> Tuple: if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_with_labels() lowerCamelCase_ =True if model_class.__name__ in [ *get_values(__a ), *get_values(__a ), ]: continue lowerCamelCase_ =model_class(__a ) model.to(__a ) model.train() lowerCamelCase_ =self._prepare_for_class(__a , __a , return_labels=__a ) lowerCamelCase_ =model(**__a ).loss loss.backward() def _snake_case ( self )-> Optional[int]: if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_with_labels() lowerCamelCase_ =False lowerCamelCase_ =True if ( model_class.__name__ in [*get_values(__a ), *get_values(__a )] or not model_class.supports_gradient_checkpointing ): continue lowerCamelCase_ =model_class(__a ) model.to(__a ) model.gradient_checkpointing_enable() model.train() lowerCamelCase_ =self._prepare_for_class(__a , __a , return_labels=__a ) lowerCamelCase_ =model(**__a ).loss loss.backward() def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(__a ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _snake_case ( self )-> int: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _snake_case ( self )-> Tuple: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(__a ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(__a , __a , __a ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def _snake_case ( self )-> Any: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =ConvNextVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase): @cached_property def _snake_case ( self )-> List[str]: return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__a ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_img() lowerCamelCase_ =preprocessor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**__a ) # verify the logits lowerCamelCase_ =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase_ =torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
707
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __UpperCamelCase ( ) ->List[str]: """simple docstring""" lowerCamelCase_ =_ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ =get_sagemaker_input() else: lowerCamelCase_ =get_cluster_input() return config def __UpperCamelCase ( _A : List[str]=None ) ->str: """simple docstring""" if subparsers is not None: lowerCamelCase_ =subparsers.add_parser("""config""" , description=_A ) else: lowerCamelCase_ =argparse.ArgumentParser("""Accelerate config command""" , description=_A ) parser.add_argument( """--config_file""" , default=_A , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =get_user_input() if args.config_file is not None: lowerCamelCase_ =args.config_file else: if not os.path.isdir(_A ): os.makedirs(_A ) lowerCamelCase_ =default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(_A ) else: config.to_yaml_file(_A ) print(f'accelerate configuration saved at {config_file}' ) def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =config_command_parser() lowerCamelCase_ =parser.parse_args() config_command(_A ) if __name__ == "__main__": main()
75
0
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCamelCase ( _A : Tuple ) ->Optional[int]: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCamelCase ( ) ->Any: """simple docstring""" with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" lowerCamelCase_ =[1, 2, 3] with pytest.raises(__UpperCamelCase ): with parallel_backend("""unsupported backend""" ): map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=2 ) with pytest.raises(__UpperCamelCase ): with parallel_backend("""unsupported backend""" ): map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def __UpperCamelCase ( _A : Union[str, Any] ) ->List[Any]: """simple docstring""" lowerCamelCase_ =[1, 2] lowerCamelCase_ ={"""a""": 1, """b""": 2} lowerCamelCase_ ={"""a""": [1, 2], """b""": [3, 4]} lowerCamelCase_ ={"""a""": {"""1""": 1}, """b""": 2} lowerCamelCase_ ={"""a""": 1, """b""": 2, """c""": 3, """d""": 4} lowerCamelCase_ =[2, 3] lowerCamelCase_ ={"""a""": 2, """b""": 3} lowerCamelCase_ ={"""a""": [2, 3], """b""": [4, 5]} lowerCamelCase_ ={"""a""": {"""1""": 2}, """b""": 3} lowerCamelCase_ ={"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
708
def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[[] for _ in range(_A )] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(_A ) <= key: return input_string for position, character in enumerate(_A ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_A ) lowerCamelCase_ =["""""".join(_A ) for row in temp_grid] lowerCamelCase_ ="""""".join(_A ) return output_string def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string lowerCamelCase_ =[[] for _ in range(_A )] # generates template for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) lowerCamelCase_ =0 for row in temp_grid: # fills in the characters lowerCamelCase_ =input_string[counter : counter + len(_A )] grid.append(list(_A ) ) counter += len(_A ) lowerCamelCase_ ="""""" # reads as zigzag for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __UpperCamelCase ( _A : str ) ->dict[int, str]: """simple docstring""" lowerCamelCase_ ={} for key_guess in range(1 , len(_A ) ): # tries every key lowerCamelCase_ =decrypt(_A , _A ) return results if __name__ == "__main__": import doctest doctest.testmod()
75
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __A : List[Any] = None __A : Tuple = logging.get_logger(__name__) __A : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} __A : Optional[int] = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } __A : Union[str, Any] = { """moussaKam/mbarthez""": 10_24, """moussaKam/barthez""": 10_24, """moussaKam/barthez-orangesum-title""": 10_24, } __A : Optional[Any] = """▁""" class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase): _UpperCamelCase:Any = VOCAB_FILES_NAMES _UpperCamelCase:List[str] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase:Optional[Any] = ['input_ids', 'attention_mask'] _UpperCamelCase:Tuple = BarthezTokenizer def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , )-> int: lowerCamelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , ) lowerCamelCase_ =vocab_file lowerCamelCase_ =False if not self.vocab_file else True def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] lowerCamelCase_ =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase_ =os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ): copyfile(self.vocab_file , _lowerCAmelCase ) return (out_vocab_file,)
709
from typing import Any class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =data lowerCamelCase_ =None class _SCREAMING_SNAKE_CASE : def __init__( self )-> Any: lowerCamelCase_ =None def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.head while temp is not None: print(temp.data , end=""" """ ) lowerCamelCase_ =temp.next print() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =Node(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.head lowerCamelCase_ =new_node def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if node_data_a == node_data_a: return else: lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next if node_a is None or node_a is None: return lowerCamelCase_ , lowerCamelCase_ =node_a.data, node_a.data if __name__ == "__main__": __A : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
75
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __A : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A : Union[str, Any] = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __A : Dict = { 'google/electra-small-generator': 5_12, 'google/electra-base-generator': 5_12, 'google/electra-large-generator': 5_12, 'google/electra-small-discriminator': 5_12, 'google/electra-base-discriminator': 5_12, 'google/electra-large-discriminator': 5_12, } __A : Tuple = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class _SCREAMING_SNAKE_CASE ( UpperCamelCase__): _UpperCamelCase:Union[str, Any] = VOCAB_FILES_NAMES _UpperCamelCase:Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase:Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase:Optional[int] = ElectraTokenizer def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> int: super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , ) lowerCamelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , _a ) != do_lower_case or normalizer_state.get("""strip_accents""" , _a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars ): lowerCamelCase_ =getattr(_a , normalizer_state.pop("""type""" ) ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =strip_accents lowerCamelCase_ =tokenize_chinese_chars lowerCamelCase_ =normalizer_class(**_a ) lowerCamelCase_ =do_lower_case def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> str: lowerCamelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: lowerCamelCase_ =self._tokenizer.model.save(_a , name=_a ) return tuple(_a )
710
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Dict = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = "yolos" def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=[512, 864] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , )-> Tuple: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =qkv_bias lowerCamelCase_ =num_detection_tokens lowerCamelCase_ =use_mid_position_embeddings lowerCamelCase_ =auxiliary_loss # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = version.parse("1.11") @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self )-> float: return 1E-4 @property def _snake_case ( self )-> int: return 12
75
0
import itertools import math def __UpperCamelCase ( _A : int ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __UpperCamelCase ( ) ->int: """simple docstring""" lowerCamelCase_ =2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __UpperCamelCase ( _A : int = 10001 ) ->int: """simple docstring""" return next(itertools.islice(prime_generator() , nth - 1 , __UpperCamelCase ) ) if __name__ == "__main__": print(F"""{solution() = }""")
711
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __A : List[Any] = 'src/transformers' __A : Tuple = 'docs/source/en' __A : Optional[int] = '.' def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : Optional[Any] ) ->Optional[Any]: """simple docstring""" with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase_ =f.readlines() # Find the start prompt. lowerCamelCase_ =0 while not lines[start_index].startswith(_A ): start_index += 1 start_index += 1 lowerCamelCase_ =start_index while not lines[end_index].startswith(_A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __A : Dict = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. __A : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') __A : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __A : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # This is to make sure the transformers module imported is the one in the repo. __A : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) def __UpperCamelCase ( _A : List[Any] ) ->str: """simple docstring""" lowerCamelCase_ =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _A ) return [m.group(0 ) for m in matches] def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =2 if text == """✅""" or text == """❌""" else len(_A ) lowerCamelCase_ =(width - text_length) // 2 lowerCamelCase_ =width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __UpperCamelCase ( ) ->Any: """simple docstring""" lowerCamelCase_ =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowerCamelCase_ ={ name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowerCamelCase_ ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) # Let's lookup through all transformers object (once). for attr_name in dir(_A ): lowerCamelCase_ =None if attr_name.endswith("""Tokenizer""" ): lowerCamelCase_ =slow_tokenizers lowerCamelCase_ =attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowerCamelCase_ =fast_tokenizers lowerCamelCase_ =attr_name[:-13] elif _re_tf_models.match(_A ) is not None: lowerCamelCase_ =tf_models lowerCamelCase_ =_re_tf_models.match(_A ).groups()[0] elif _re_flax_models.match(_A ) is not None: lowerCamelCase_ =flax_models lowerCamelCase_ =_re_flax_models.match(_A ).groups()[0] elif _re_pt_models.match(_A ) is not None: lowerCamelCase_ =pt_models lowerCamelCase_ =_re_pt_models.match(_A ).groups()[0] if lookup_dict is not None: while len(_A ) > 0: if attr_name in model_name_to_prefix.values(): lowerCamelCase_ =True break # Try again after removing the last word in the name lowerCamelCase_ ="""""".join(camel_case_split(_A )[:-1] ) # Let's build that table! lowerCamelCase_ =list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowerCamelCase_ =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowerCamelCase_ =[len(_A ) + 2 for c in columns] lowerCamelCase_ =max([len(_A ) for name in model_names] ) + 2 # Build the table per se lowerCamelCase_ ="""|""" + """|""".join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowerCamelCase_ ={True: """✅""", False: """❌"""} for name in model_names: lowerCamelCase_ =model_name_to_prefix[name] lowerCamelCase_ =[ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n" return table def __UpperCamelCase ( _A : str=False ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_find_text_in_file( filename=os.path.join(_A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowerCamelCase_ =get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __A : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
75
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Dict = logging.get_logger(__name__) __A : List[str] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Dict = "camembert" def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> Optional[Any]: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =classifier_dropout class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ ={0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCamelCase_ ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
712
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =num_stages lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =out_features lowerCamelCase_ =num_labels lowerCamelCase_ =scope lowerCamelCase_ =num_stages def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def _snake_case ( self )-> List[Any]: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _snake_case ( self )-> Union[str, Any]: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _snake_case ( self )-> str: lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =config_and_inputs lowerCamelCase_ ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else () _UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Dict = False _UpperCamelCase:int = False _UpperCamelCase:Any = False _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Optional[Any] = False def _snake_case ( self )-> int: lowerCamelCase_ =UperNetModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )-> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )-> Tuple: return def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _snake_case ( self )-> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _snake_case ( self )-> str: pass def _snake_case ( self )-> Optional[int]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _snake_case ( self )-> Dict: pass @slow def _snake_case ( self )-> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( ) ->Tuple: """simple docstring""" lowerCamelCase_ =hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) lowerCamelCase_ =Image.open(_A ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[Any]: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) def _snake_case ( self )-> int: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
75
0
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCamelCase ( _A : str , _A : str , _A : str ) ->Union[str, Any]: """simple docstring""" def get_masked_lm_array(_A : str ): lowerCamelCase_ =f'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_array(_A : str ): lowerCamelCase_ =f'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_layer_array(_A : int , _A : str ): lowerCamelCase_ =f'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_attention_layer_array(_A : int , _A : str , _A : Dict ): lowerCamelCase_ =f'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) lowerCamelCase_ =array.reshape(__snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) print(f'Loading model based on config from {config_path}...' ) lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) lowerCamelCase_ =BertForMaskedLM(__snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowerCamelCase_ =model.bert.encoder.layer[layer_index] # Self-attention lowerCamelCase_ =layer.attention.self lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_query_dense/kernel""" , self_attn.query.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_query_dense/bias""" , self_attn.query.bias.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_key_dense/kernel""" , self_attn.key.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_key_dense/bias""" , self_attn.key.bias.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_value_dense/kernel""" , self_attn.value.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_value_dense/bias""" , self_attn.value.bias.data.shape ) # Self-attention Output lowerCamelCase_ =layer.attention.output lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_output_dense/kernel""" , self_output.dense.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , """_output_dense/bias""" , self_output.dense.bias.data.shape ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_attention_layer_norm/gamma""" ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_attention_layer_norm/beta""" ) # Intermediate lowerCamelCase_ =layer.intermediate lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_intermediate_dense/kernel""" ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_intermediate_dense/bias""" ) # Output lowerCamelCase_ =layer.output lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_dense/kernel""" ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_dense/bias""" ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_layer_norm/gamma""" ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_layer_norm/beta""" ) # Embeddings lowerCamelCase_ =get_encoder_array("""_position_embedding_layer/embeddings""" ) lowerCamelCase_ =get_encoder_array("""_type_embedding_layer/embeddings""" ) lowerCamelCase_ =get_encoder_array("""_embedding_norm_layer/gamma""" ) lowerCamelCase_ =get_encoder_array("""_embedding_norm_layer/beta""" ) # LM Head lowerCamelCase_ =model.cls.predictions.transform lowerCamelCase_ =get_masked_lm_array("""dense/kernel""" ) lowerCamelCase_ =get_masked_lm_array("""dense/bias""" ) lowerCamelCase_ =get_masked_lm_array("""layer_norm/gamma""" ) lowerCamelCase_ =get_masked_lm_array("""layer_norm/beta""" ) lowerCamelCase_ =get_masked_lm_array("""embedding_table""" ) # Pooling lowerCamelCase_ =BertPooler(config=__snake_case ) lowerCamelCase_ =get_encoder_array("""_pooler_layer/kernel""" ) lowerCamelCase_ =get_encoder_array("""_pooler_layer/bias""" ) # Export final model model.save_pretrained(__snake_case ) # Integration test - should load without any errors ;) lowerCamelCase_ =BertForMaskedLM.from_pretrained(__snake_case ) print(new_model.eval() ) print("""Model conversion was done sucessfully!""" ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model.', ) __A : List[str] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
713
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = ["pixel_values"] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] ) lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( _SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature: lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ ={"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
75
0
def __UpperCamelCase ( ) ->Optional[int]: """simple docstring""" for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def __UpperCamelCase ( _A : List[str] ) ->Any: """simple docstring""" lowerCamelCase_ =1 lowerCamelCase_ =2 while i * i <= n: lowerCamelCase_ =0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __UpperCamelCase ( ) ->Optional[Any]: """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(a__ ) > 500 ) if __name__ == "__main__": print(solution())
714
# Imports import numpy as np class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: if red is not None: lowerCamelCase_ =red if green is not None: lowerCamelCase_ =green if blue is not None: lowerCamelCase_ =blue if red_edge is not None: lowerCamelCase_ =red_edge if nir is not None: lowerCamelCase_ =nir return True def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={ """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def _snake_case ( self )-> Optional[Any]: return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case ( self )-> Tuple: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case ( self )-> str: return self.nir * (self.red / (self.green**2)) def _snake_case ( self )-> Optional[int]: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case ( self )-> Tuple: return (self.nir - self.red) / (self.nir + self.red) def _snake_case ( self )-> Dict: return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case ( self )-> List[Any]: return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case ( self )-> Tuple: return (self.nir - self.green) / (self.nir + self.green) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case ( self )-> Tuple: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case ( self )-> Any: return (self.nir / self.green) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.nir / self.redEdge) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.red - self.blue) / self.red def _snake_case ( self )-> Dict: lowerCamelCase_ =self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case ( self )-> int: return self.nir - self.green def _snake_case ( self )-> Dict: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case ( self )-> List[str]: lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]: return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case ( self )-> int: return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: return (self.nir - b) / (a * self.red) def _snake_case ( self )-> int: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case ( self )-> Optional[Any]: return (self.red + self.green + self.blue) / 3_0.5 def _snake_case ( self )-> List[str]: return self.nir / self.red def _snake_case ( self )-> List[str]: return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case ( self )-> str: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case ( self )-> List[Any]: return self.green / (self.nir + self.red + self.green) def _snake_case ( self )-> Dict: return self.nir / (self.nir + self.red + self.green) def _snake_case ( self )-> List[str]: return self.red / (self.nir + self.red + self.green) def _snake_case ( self )-> int: return (self.green - self.red) / (self.green + self.red) def _snake_case ( self )-> str: return (self.red - self.green) / (self.red + self.green) def _snake_case ( self )-> str: lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case ( self )-> List[str]: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case ( self )-> List[Any]: return self.nir / self.red def _snake_case ( self )-> Optional[int]: return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case ( self )-> str: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
75
0
import itertools import string from collections.abc import Generator, Iterable def __UpperCamelCase ( _A : Iterable[str] , _A : int ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =iter(UpperCamelCase__ ) while True: lowerCamelCase_ =tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) ) if not chunk: return yield chunk def __UpperCamelCase ( _A : str ) ->Dict: """simple docstring""" lowerCamelCase_ ="""""".join([c.upper() for c in dirty if c in string.ascii_letters] ) lowerCamelCase_ ="""""" if len(UpperCamelCase__ ) < 2: return dirty for i in range(len(UpperCamelCase__ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(UpperCamelCase__ ) & 1: clean += "X" return clean def __UpperCamelCase ( _A : str ) ->Optional[Any]: """simple docstring""" # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) lowerCamelCase_ ="""ABCDEFGHIKLMNOPQRSTUVWXYZ""" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler lowerCamelCase_ =[] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(UpperCamelCase__ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(UpperCamelCase__ ) return table def __UpperCamelCase ( _A : str , _A : str ) ->str: """simple docstring""" lowerCamelCase_ =generate_table(UpperCamelCase__ ) lowerCamelCase_ =prepare_input(UpperCamelCase__ ) lowerCamelCase_ ="""""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(UpperCamelCase__ , 2 ): lowerCamelCase_ , lowerCamelCase_ =divmod(table.index(UpperCamelCase__ ) , 5 ) lowerCamelCase_ , lowerCamelCase_ =divmod(table.index(UpperCamelCase__ ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __UpperCamelCase ( _A : str , _A : str ) ->int: """simple docstring""" lowerCamelCase_ =generate_table(UpperCamelCase__ ) lowerCamelCase_ ="""""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(UpperCamelCase__ , 2 ): lowerCamelCase_ , lowerCamelCase_ =divmod(table.index(UpperCamelCase__ ) , 5 ) lowerCamelCase_ , lowerCamelCase_ =divmod(table.index(UpperCamelCase__ ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
715
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[str] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
716
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='%(message)s') def __UpperCamelCase ( _A : np.ndarray ) ->np.ndarray: """simple docstring""" return input_array.reshape((input_array.size, 1) ) def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.mean(1 ) # Centralize the data of class i lowerCamelCase_ =data - column_reshape(_A ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_A , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =np.dot(_A , centered_data.T ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" lowerCamelCase_ =features.mean(1 ) lowerCamelCase_ =np.nan for i in range(_A ): lowerCamelCase_ =features[:, labels == i] lowerCamelCase_ =data.shape[1] lowerCamelCase_ =data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCamelCase_ =device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) return covariance_sum / features.shape[1] def __UpperCamelCase ( _A : np.ndarray , _A : int ) ->np.ndarray: """simple docstring""" # Check if the features have been loaded if features.any(): lowerCamelCase_ =features.mean(1 ) # Center the dataset lowerCamelCase_ =features - np.reshape(_A , (data_mean.size, 1) ) lowerCamelCase_ =np.dot(_A , centered_data.T ) / features.shape[1] lowerCamelCase_ , lowerCamelCase_ =np.linalg.eigh(_A ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCamelCase_ =np.dot(filtered_eigenvectors.T , _A ) logging.info("""Principal Component Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int ) ->np.ndarray: """simple docstring""" assert classes > dimensions # Check if features have been already loaded if features.any: lowerCamelCase_ , lowerCamelCase_ =eigh( covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , ) lowerCamelCase_ =eigenvectors[:, ::-1][:, :dimensions] lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =np.linalg.svd(_A ) lowerCamelCase_ =svd_matrix[:, 0:dimensions] lowerCamelCase_ =np.dot(filtered_svd_matrix.T , _A ) logging.info("""Linear Discriminant Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A ) logging.error("""Dataset empty""" ) raise AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" # Create dummy dataset with 2 classes and 3 features lowerCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCamelCase_ =np.array([0, 0, 0, 1, 1] ) lowerCamelCase_ =2 lowerCamelCase_ =2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_A ) as error_info: lowerCamelCase_ =linear_discriminant_analysis( _A , _A , _A , _A ) if isinstance(_A , np.ndarray ): raise AssertionError( """Did not raise AssertionError for dimensions > classes""" ) assert error_info.type is AssertionError def __UpperCamelCase ( ) ->None: """simple docstring""" lowerCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCamelCase_ =2 lowerCamelCase_ =np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(_A ) as error_info: lowerCamelCase_ =principal_component_analysis(_A , _A ) if not np.allclose(_A , _A ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
75
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _SCREAMING_SNAKE_CASE : _UpperCamelCase:Optional[int] = 42 _UpperCamelCase:Optional[Any] = None _UpperCamelCase:Any = None __A : List[str] = namedtuple('CoinsDistribResult', 'moves excess') def __UpperCamelCase ( _A : TreeNode | None ) ->int: """simple docstring""" if root is None: return 0 # Validation def count_nodes(_A : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_A : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_A ) != count_coins(_A ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(_A : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCamelCase_ =get_distrib(node.left ) lowerCamelCase_ =get_distrib(node.right ) lowerCamelCase_ =1 - left_distrib_excess lowerCamelCase_ =1 - right_distrib_excess lowerCamelCase_ =( left_distrib_moves + right_distrib_moves + abs(_A ) + abs(_A ) ) lowerCamelCase_ =node.data - coins_to_left - coins_to_right return CoinsDistribResult(_A , _A ) return get_distrib(_A )[0] if __name__ == "__main__": import doctest doctest.testmod()
717
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __A : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') __A : str = F"""https://www.google.com/search?q={query}&num=100""" __A : int = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: __A : str = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: __A : Any = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
75
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self , _SCREAMING_SNAKE_CASE )-> Dict: lowerCamelCase_ =parent def _snake_case ( self )-> List[Any]: return {} def __UpperCamelCase ( ) ->List[Any]: """simple docstring""" lowerCamelCase_ ="""<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR=\"FFFFFF\"> <HR> <a href=\"http://google.com\">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style=\"color:#0000FF\"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>""" lowerCamelCase_ =""" <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> """ return [html_string_a, html_string_a] @require_bsa class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None def _snake_case ( self )-> int: lowerCamelCase_ =MarkupLMFeatureExtractionTester(self ) @property def _snake_case ( self )-> int: return self.feature_extract_tester.prepare_feat_extract_dict() def _snake_case ( self )-> Optional[int]: # Initialize feature_extractor lowerCamelCase_ =self.feature_extraction_class() # Test not batched input lowerCamelCase_ =get_html_strings()[0] lowerCamelCase_ =feature_extractor(__A ) # fmt: off lowerCamelCase_ =[["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]] lowerCamelCase_ =[["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]] # fmt: on self.assertEqual(encoding.nodes , __A ) self.assertEqual(encoding.xpaths , __A ) # Test batched lowerCamelCase_ =get_html_strings() lowerCamelCase_ =feature_extractor(__A ) # fmt: off lowerCamelCase_ =expected_nodes + [["""My First Heading""", """My first paragraph."""]] lowerCamelCase_ =expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , __A ) self.assertEqual(encoding.xpaths , __A )
718
from ..utils import DummyObject, requires_backends class _SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase__): _UpperCamelCase:List[Any] = ["torch", "torchsde"] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[Any]: requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]: requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str: requires_backends(cls , ["""torch""", """torchsde"""] )
75
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=5_12, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def __UpperCamelCase ( _A : Optional[Any] ) ->Any: """simple docstring""" if string == "True": return True elif string == "False": return False else: raise ValueError(f'could not parse string as bool {string}' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A : List[Any] = parser.parse_args() __A : Union[str, Any] = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
719
from collections import namedtuple import requests from lxml import html # type: ignore __A : Dict = namedtuple('covid_data', 'cases deaths recovered') def __UpperCamelCase ( _A : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data: """simple docstring""" lowerCamelCase_ ="""//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) ) __A : Union[str, Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
75
0
'''simple docstring''' from math import sqrt def __UpperCamelCase ( _A : int ) ->int: """simple docstring""" lowerCamelCase_ =0 for i in range(1 , int(sqrt(lowercase_ ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase_ ): total += i + n // i elif i == sqrt(lowercase_ ): total += i return total - n def __UpperCamelCase ( _A : int = 10000 ) ->int: """simple docstring""" lowerCamelCase_ =sum( i for i in range(1 , lowercase_ ) if sum_of_divisors(sum_of_divisors(lowercase_ ) ) == i and sum_of_divisors(lowercase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
720
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
0
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : List[Any] = 16 __A : Any = 32 def __UpperCamelCase ( _A : Accelerator , _A : int = 16 ) ->Dict: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCamelCase_ =load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_A : Dict ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ =datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ =tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_A : int ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ =16 elif accelerator.mixed_precision != "no": lowerCamelCase_ =8 else: lowerCamelCase_ =None return tokenizer.pad( lowerCAmelCase__ , padding="""longest""" , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) lowerCamelCase_ =DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Union[str, Any] = mocked_dataloaders # noqa: F811 def __UpperCamelCase ( _A : int , _A : List[Any] ) ->Optional[int]: """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase__ ) == "1": lowerCamelCase_ =2 # Initialize accelerator lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ =config["""lr"""] lowerCamelCase_ =int(config["""num_epochs"""] ) lowerCamelCase_ =int(config["""seed"""] ) lowerCamelCase_ =int(config["""batch_size"""] ) lowerCamelCase_ =evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase__ ) def inner_training_loop(_A : List[Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ =AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) lowerCamelCase_ , lowerCamelCase_ =get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ ) # Instantiate scheduler lowerCamelCase_ =get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_ =model(**lowerCAmelCase__ ) lowerCamelCase_ =outputs.loss accelerator.backward(lowerCAmelCase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase__ ) lowerCamelCase_ =outputs.logits.argmax(dim=-1 ) lowerCamelCase_ , lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) lowerCamelCase_ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCamelCase ( ) ->str: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
721
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict: """simple docstring""" return float((preds == labels).mean() ) def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]: """simple docstring""" lowerCamelCase_ =simple_accuracy(_A , _A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) ) return { "accuracy": acc, "f1": fa, } def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int: """simple docstring""" lowerCamelCase_ ={} for id_pred, label in zip(_A , _A ): lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' lowerCamelCase_ =id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCamelCase_ =[(pred, label)] lowerCamelCase_ , lowerCamelCase_ =[], [] for question, preds_labels in question_map.items(): lowerCamelCase_ , lowerCamelCase_ =zip(*_A ) lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" ) fas.append(_A ) lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) ) ems.append(_A ) lowerCamelCase_ =float(sum(_A ) / len(_A ) ) lowerCamelCase_ =sum(_A ) / len(_A ) lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _SCREAMING_SNAKE_CASE ( datasets.Metric): def _snake_case ( self )-> Union[str, Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _snake_case ( self )-> Optional[Any]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif self.config_name == "cb": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" ) elif self.config_name == "record": lowerCamelCase_ =[ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] elif self.config_name == "multirc": return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
75
0
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __A : Optional[Any] = logging.get_logger(__name__) set_seed(7_70) __A : str = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } __A : List[Any] = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } __A : Optional[Any] = os.path.dirname(os.path.abspath(__file__)) __A : Optional[int] = os.path.join(os.path.expanduser('~'), '.cache') __A : Optional[int] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def __UpperCamelCase ( _A : str , _A : int=False ) ->List[Any]: """simple docstring""" lowerCamelCase_ =model_type if use_small: key += "_small" return os.path.join(_lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] ) def __UpperCamelCase ( _A : Dict , _A : str ) ->List[str]: """simple docstring""" os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) hf_hub_download(repo_id=_lowerCamelCase , filename=_lowerCamelCase , local_dir=_lowerCamelCase ) def __UpperCamelCase ( _A : Optional[Any] , _A : Dict , _A : Optional[Any]=False , _A : Union[str, Any]="text" ) ->str: """simple docstring""" if model_type == "text": lowerCamelCase_ =BarkSemanticModel lowerCamelCase_ =BarkSemanticConfig lowerCamelCase_ =BarkSemanticGenerationConfig elif model_type == "coarse": lowerCamelCase_ =BarkCoarseModel lowerCamelCase_ =BarkCoarseConfig lowerCamelCase_ =BarkCoarseGenerationConfig elif model_type == "fine": lowerCamelCase_ =BarkFineModel lowerCamelCase_ =BarkFineConfig lowerCamelCase_ =BarkFineGenerationConfig else: raise NotImplementedError() lowerCamelCase_ =f'{model_type}_small' if use_small else model_type lowerCamelCase_ =REMOTE_MODEL_PATHS[model_key] if not os.path.exists(_lowerCamelCase ): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info["""repo_id"""] , model_info["""file_name"""] ) lowerCamelCase_ =torch.load(_lowerCamelCase , map_location=_lowerCamelCase ) # this is a hack lowerCamelCase_ =checkpoint["""model_args"""] if "input_vocab_size" not in model_args: lowerCamelCase_ =model_args["""vocab_size"""] lowerCamelCase_ =model_args["""vocab_size"""] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowerCamelCase_ =model_args.pop("""n_head""" ) lowerCamelCase_ =model_args.pop("""n_embd""" ) lowerCamelCase_ =model_args.pop("""n_layer""" ) lowerCamelCase_ =ConfigClass(**checkpoint["""model_args"""] ) lowerCamelCase_ =ModelClass(config=_lowerCamelCase ) lowerCamelCase_ =GenerationConfigClass() lowerCamelCase_ =model_generation_config lowerCamelCase_ =checkpoint["""model"""] # fixup checkpoint lowerCamelCase_ ="""_orig_mod.""" for k, v in list(state_dict.items() ): if k.startswith(_lowerCamelCase ): # replace part of the key with corresponding layer name in HF implementation lowerCamelCase_ =k[len(_lowerCamelCase ) :] for old_layer_name in new_layer_name_dict: lowerCamelCase_ =new_k.replace(_lowerCamelCase , new_layer_name_dict[old_layer_name] ) lowerCamelCase_ =state_dict.pop(_lowerCamelCase ) lowerCamelCase_ =set(state_dict.keys() ) - set(model.state_dict().keys() ) lowerCamelCase_ ={k for k in extra_keys if not k.endswith(""".attn.bias""" )} lowerCamelCase_ =set(model.state_dict().keys() ) - set(state_dict.keys() ) lowerCamelCase_ ={k for k in missing_keys if not k.endswith(""".attn.bias""" )} if len(_lowerCamelCase ) != 0: raise ValueError(f'extra keys found: {extra_keys}' ) if len(_lowerCamelCase ) != 0: raise ValueError(f'missing keys: {missing_keys}' ) model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) lowerCamelCase_ =model.num_parameters(exclude_embeddings=_lowerCamelCase ) lowerCamelCase_ =checkpoint["""best_val_loss"""].item() logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(_lowerCamelCase , 3 )} loss' ) model.eval() model.to(_lowerCamelCase ) del checkpoint, state_dict return model def __UpperCamelCase ( _A : Union[str, Any] , _A : Optional[int]=False , _A : List[Any]="text" ) ->Union[str, Any]: """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowerCamelCase_ ="""cpu""" # do conversion on cpu lowerCamelCase_ =_get_ckpt_path(_lowerCamelCase , use_small=_lowerCamelCase ) lowerCamelCase_ =_load_model(_lowerCamelCase , _lowerCamelCase , model_type=_lowerCamelCase , use_small=_lowerCamelCase ) # load bark initial model lowerCamelCase_ =_bark_load_model(_lowerCamelCase , """cpu""" , model_type=_lowerCamelCase , use_small=_lowerCamelCase ) if model_type == "text": lowerCamelCase_ =bark_model["""model"""] if model.num_parameters(exclude_embeddings=_lowerCamelCase ) != bark_model.get_num_params(): raise ValueError("""initial and new models don\'t have the same number of parameters""" ) # check if same output as the bark model lowerCamelCase_ =5 lowerCamelCase_ =10 if model_type in ["text", "coarse"]: lowerCamelCase_ =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) lowerCamelCase_ =bark_model(_lowerCamelCase )[0] lowerCamelCase_ =model(_lowerCamelCase ) # take last logits lowerCamelCase_ =output_new_model_total.logits[:, [-1], :] else: lowerCamelCase_ =3 lowerCamelCase_ =8 lowerCamelCase_ =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) lowerCamelCase_ =model(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ =bark_model(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ =output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("""initial and new outputs don\'t have the same shape""" ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError("""initial and new outputs are not equal""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) def __UpperCamelCase ( _A : Optional[Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Tuple , _A : str , ) ->List[str]: """simple docstring""" lowerCamelCase_ =os.path.join(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ =BarkSemanticConfig.from_pretrained(os.path.join(_lowerCamelCase , """config.json""" ) ) lowerCamelCase_ =BarkCoarseConfig.from_pretrained(os.path.join(_lowerCamelCase , """config.json""" ) ) lowerCamelCase_ =BarkFineConfig.from_pretrained(os.path.join(_lowerCamelCase , """config.json""" ) ) lowerCamelCase_ =EncodecConfig.from_pretrained("""facebook/encodec_24khz""" ) lowerCamelCase_ =BarkSemanticModel.from_pretrained(_lowerCamelCase ) lowerCamelCase_ =BarkCoarseModel.from_pretrained(_lowerCamelCase ) lowerCamelCase_ =BarkFineModel.from_pretrained(_lowerCamelCase ) lowerCamelCase_ =EncodecModel.from_pretrained("""facebook/encodec_24khz""" ) lowerCamelCase_ =BarkConfig.from_sub_model_configs( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ =BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) lowerCamelCase_ =BarkModel(_lowerCamelCase ) lowerCamelCase_ =semantic lowerCamelCase_ =coarseAcoustic lowerCamelCase_ =fineAcoustic lowerCamelCase_ =codec lowerCamelCase_ =bark_generation_config Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) bark.save_pretrained(_lowerCamelCase , repo_id=_lowerCamelCase , push_to_hub=_lowerCamelCase ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() # Required parameters parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') __A : Optional[int] = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
700
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : Union[str, Any] = logging.get_logger(__name__) __A : Optional[Any] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = "deta" _UpperCamelCase:int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , )-> str: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase_ =CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =backbone_config.pop("""model_type""" ) lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ =config_class.from_dict(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =backbone_config lowerCamelCase_ =num_queries lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =init_xavier_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =auxiliary_loss lowerCamelCase_ =position_embedding_type # deformable attributes lowerCamelCase_ =num_feature_levels lowerCamelCase_ =encoder_n_points lowerCamelCase_ =decoder_n_points lowerCamelCase_ =two_stage lowerCamelCase_ =two_stage_num_proposals lowerCamelCase_ =with_box_refine lowerCamelCase_ =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =mask_loss_coefficient lowerCamelCase_ =dice_loss_coefficient lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient lowerCamelCase_ =focal_alpha super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> int: return self.encoder_attention_heads @property def _snake_case ( self )-> int: return self.d_model def _snake_case ( self )-> str: lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.backbone_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
75
0
from PIL import Image def __UpperCamelCase ( _A : Union[str, Any] ) ->str: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ =image.size lowerCamelCase_ =0 lowerCamelCase_ =image.load() for i in range(_A ): for j in range(_A ): lowerCamelCase_ =pixels[j, i] mean += pixel mean //= width * height for j in range(_A ): for i in range(_A ): lowerCamelCase_ =255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __A : Dict = mean_threshold(Image.open('path_to_image').convert('L')) image.save('output_image_path')
701
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig __A : int = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Any = "albert" def __init__( self , _SCREAMING_SNAKE_CASE=3_0000 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=1_6384 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , **_SCREAMING_SNAKE_CASE , )-> Optional[int]: super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =vocab_size lowerCamelCase_ =embedding_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_hidden_groups lowerCamelCase_ =num_attention_heads lowerCamelCase_ =inner_group_num lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =position_embedding_type class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ ={0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCamelCase_ ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
75
0
def __UpperCamelCase ( _A : str ) ->list: """simple docstring""" return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(SCREAMING_SNAKE_CASE_ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('doctest').testmod()
702
from collections import deque from math import floor from random import random from time import time class _SCREAMING_SNAKE_CASE : def __init__( self )-> List[str]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]: if self.graph.get(_SCREAMING_SNAKE_CASE ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: lowerCamelCase_ =[[w, v]] if not self.graph.get(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =[] def _snake_case ( self )-> str: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]: lowerCamelCase_ =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: return len(self.graph[u] ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]: lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return sorted_nodes def _snake_case ( self )-> str: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin class _SCREAMING_SNAKE_CASE : def __init__( self )-> Optional[Any]: lowerCamelCase_ ={} def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]: # check if the u exists if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist lowerCamelCase_ =[[w, v]] # add the other way if self.graph.get(_SCREAMING_SNAKE_CASE ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist lowerCamelCase_ =[[w, u]] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_SCREAMING_SNAKE_CASE ) # the other way round if self.graph.get(_SCREAMING_SNAKE_CASE ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int: if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_SCREAMING_SNAKE_CASE ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]: if c == -1: lowerCamelCase_ =floor(random() * 1_0000 ) + 10 for i in range(_SCREAMING_SNAKE_CASE ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]: lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return len(self.graph[u] ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(_SCREAMING_SNAKE_CASE ) visited.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(_SCREAMING_SNAKE_CASE ) != 0: lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1] else: lowerCamelCase_ =False indirect_parents.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(_SCREAMING_SNAKE_CASE ) == 0: return False def _snake_case ( self )-> Optional[Any]: return list(self.graph ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str: lowerCamelCase_ =time() self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict: lowerCamelCase_ =time() self.bfs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =time() return end - begin
75
0
import math def __UpperCamelCase ( _A : Optional[Any] ) ->bool: """simple docstring""" return math.sqrt(SCREAMING_SNAKE_CASE_ ) * math.sqrt(SCREAMING_SNAKE_CASE_ ) == num def __UpperCamelCase ( _A : Tuple ) ->bool: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =n while left <= right: lowerCamelCase_ =(left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCamelCase_ =mid - 1 else: lowerCamelCase_ =mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
703
import os from datetime import datetime as dt from github import Github __A : Optional[int] = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase_ =g.get_repo("""huggingface/diffusers""" ) lowerCamelCase_ =repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase_ =sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase_ =comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
75
0
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE_ ( _A : Dict ) ->Tuple: """simple docstring""" lowerCamelCase_ =args.pruning_method lowerCamelCase_ =args.threshold lowerCamelCase_ =args.model_name_or_path.rstrip("""/""" ) lowerCamelCase_ =args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) lowerCamelCase_ =torch.load(os.path.join(__lowercase , """pytorch_model.bin""" ) ) lowerCamelCase_ ={} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCamelCase_ =tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: lowerCamelCase_ =tensor print(f'Copied layer {name}' ) elif "bias" in name: lowerCamelCase_ =tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": lowerCamelCase_ =MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase ) lowerCamelCase_ =tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[f'{prefix_}mask_scores'] lowerCamelCase_ =TopKBinarizer.apply(__lowercase , __lowercase ) lowerCamelCase_ =tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[f'{prefix_}mask_scores'] lowerCamelCase_ =ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase ) lowerCamelCase_ =tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[f'{prefix_}mask_scores'] lowerCamelCase_ =-0.1, 1.1 lowerCamelCase_ =torch.sigmoid(__lowercase ) lowerCamelCase_ =s * (r - l) + l lowerCamelCase_ =s_bar.clamp(min=0.0 , max=1.0 ) lowerCamelCase_ =tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: lowerCamelCase_ =os.path.join( os.path.dirname(__lowercase ) , f'bertarized_{os.path.basename(__lowercase )}' ) if not os.path.isdir(__lowercase ): shutil.copytree(__lowercase , __lowercase ) print(f'\nCreated folder {target_model_path}' ) torch.save(__lowercase , os.path.join(__lowercase , """pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) __A : List[Any] = parser.parse_args() main(args)
704
import argparse import os import re __A : Optional[Any] = 'src/diffusers' # Pattern that looks at the indentation in a line. __A : int = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __A : Dict = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __A : Optional[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __A : int = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __A : Optional[Any] = re.compile(R'\[([^\]]+)\]') def __UpperCamelCase ( _A : int ) ->Dict: """simple docstring""" lowerCamelCase_ =_re_indent.search(_A ) return "" if search is None else search.groups()[0] def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[int]="" , _A : int=None , _A : List[str]=None ) ->List[Any]: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 lowerCamelCase_ =["""\n""".join(lines[:index] )] else: lowerCamelCase_ =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase_ =[lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_A ) ) if index < len(_A ) - 1: lowerCamelCase_ =[lines[index + 1]] index += 1 else: lowerCamelCase_ =[] else: blocks.append("""\n""".join(_A ) ) lowerCamelCase_ =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append("""\n""".join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __UpperCamelCase ( _A : Optional[int] ) ->Optional[int]: """simple docstring""" def _inner(_A : Optional[Any] ): return key(_A ).lower().replace("""_""" , """""" ) return _inner def __UpperCamelCase ( _A : int , _A : List[Any]=None ) ->List[str]: """simple docstring""" # If no key is provided, we use a noop. def noop(_A : List[str] ): return x if key is None: lowerCamelCase_ =noop # Constants are all uppercase, they go first. lowerCamelCase_ =[obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase_ =[obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase_ =[obj for obj in objects if not key(_A )[0].isupper()] lowerCamelCase_ =ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def __UpperCamelCase ( _A : List[str] ) ->List[str]: """simple docstring""" # This inner function sort imports between [ ]. def _replace(_A : Optional[Any] ): lowerCamelCase_ =match.groups()[0] if "," not in imports: return f'[{imports}]' lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" lowerCamelCase_ =import_statement.split("""\n""" ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase_ =2 if lines[1].strip() == """[""" else 1 lowerCamelCase_ =[(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase_ =sort_objects(_A , key=lambda _A : x[1] ) lowerCamelCase_ =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase_ =_re_bracket_content.sub(_replace , lines[1] ) else: lowerCamelCase_ =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ =keys[:-1] lowerCamelCase_ =get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase_ =_re_bracket_content.sub(_replace , _A ) return import_statement def __UpperCamelCase ( _A : List[Any] , _A : Optional[Any]=True ) ->str: """simple docstring""" with open(_A , """r""" ) as f: lowerCamelCase_ =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase_ =split_code_in_indented_blocks( _A , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase_ =main_blocks[block_idx] lowerCamelCase_ =block.split("""\n""" ) # Get to the start of the imports. lowerCamelCase_ =0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase_ =len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase_ ="""\n""".join(block_lines[line_idx:-1] ) lowerCamelCase_ =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase_ =split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase_ =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase_ =[(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase_ =[(i, key) for i, key in enumerate(_A ) if key is not None] lowerCamelCase_ =[x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase_ =0 lowerCamelCase_ =[] for i in range(len(_A ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCamelCase_ =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase_ ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , """w""" ) as f: f.write("""\n""".join(_A ) ) def __UpperCamelCase ( _A : str=True ) ->List[Any]: """simple docstring""" lowerCamelCase_ =[] for root, _, files in os.walk(_A ): if "__init__.py" in files: lowerCamelCase_ =sort_imports(os.path.join(_A , """__init__.py""" ) , check_only=_A ) if result: lowerCamelCase_ =[os.path.join(_A , """__init__.py""" )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __A : Optional[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
75
0
def __UpperCamelCase ( _A : Tuple = 10 ) ->Dict: """simple docstring""" if not isinstance(__snake_case , __snake_case ) or n < 0: raise ValueError("""Invalid input""" ) lowerCamelCase_ =10**n lowerCamelCase_ =28433 * (pow(2 , 7830457 , __snake_case )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
705
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : str = {'vocab_file': 'sentencepiece.model'} __A : Optional[Any] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } __A : int = { 'google/rembert': 2_56, } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:List[Any] = VOCAB_FILES_NAMES _UpperCamelCase:Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , )-> str: super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =remove_space lowerCamelCase_ =keep_accents lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> Dict: return len(self.sp_model ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self )-> Optional[Any]: lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =d lowerCamelCase_ =spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: lowerCamelCase_ =self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE ) return pieces def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE ) return out_string def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]: lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
75
0
def __UpperCamelCase ( _A : Dict , _A : str , _A : Tuple ) ->Optional[int]: """simple docstring""" if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError("""The length of profit and weight must be same.""" ) if max_weight <= 0: raise ValueError("""max_weight must greater than zero.""" ) if any(p < 0 for p in profit ): raise ValueError("""Profit can not be negative.""" ) if any(w < 0 for w in weight ): raise ValueError("""Weight can not be negative.""" ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. lowerCamelCase_ =[p / w for p, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] # Creating a copy of the list and sorting profit/weight in ascending order lowerCamelCase_ =sorted(lowerCAmelCase__ ) # declaring useful variables lowerCamelCase_ =len(lowerCAmelCase__ ) lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight lowerCamelCase_ =sorted_profit_by_weight[length - i - 1] lowerCamelCase_ =profit_by_weight.index(lowerCAmelCase__ ) lowerCamelCase_ =-1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( 'Input profits, weights, and then max_weight (all positive ints) separated by ' 'spaces.' ) __A : int = [int(x) for x in input('Input profits separated by spaces: ').split()] __A : List[Any] = [int(x) for x in input('Input weights separated by spaces: ').split()] __A : Optional[Any] = int(input('Max weight allowed: ')) # Function Call calc_profit(profit, weight, max_weight)
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[str]: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) ) self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) ) def _snake_case ( self )-> int: lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =get_activation("""gelu_10""" ) lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self )-> Dict: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation("""bogus""" ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): get_activation(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =get_activation("""gelu""" ) lowerCamelCase_ =1 lowerCamelCase_ =get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =acta.a
75
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __A : Union[str, Any] = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
707
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __UpperCamelCase ( ) ->List[str]: """simple docstring""" lowerCamelCase_ =_ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ =get_sagemaker_input() else: lowerCamelCase_ =get_cluster_input() return config def __UpperCamelCase ( _A : List[str]=None ) ->str: """simple docstring""" if subparsers is not None: lowerCamelCase_ =subparsers.add_parser("""config""" , description=_A ) else: lowerCamelCase_ =argparse.ArgumentParser("""Accelerate config command""" , description=_A ) parser.add_argument( """--config_file""" , default=_A , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =get_user_input() if args.config_file is not None: lowerCamelCase_ =args.config_file else: if not os.path.isdir(_A ): os.makedirs(_A ) lowerCamelCase_ =default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(_A ) else: config.to_yaml_file(_A ) print(f'accelerate configuration saved at {config_file}' ) def __UpperCamelCase ( ) ->Dict: """simple docstring""" lowerCamelCase_ =config_command_parser() lowerCamelCase_ =parser.parse_args() config_command(_A ) if __name__ == "__main__": main()
75
0
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=[2, 3, 4] , _SCREAMING_SNAKE_CASE=None , )-> str: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =num_stages lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =num_labels lowerCamelCase_ =initializer_range lowerCamelCase_ =out_features lowerCamelCase_ =out_indices lowerCamelCase_ =scope def _snake_case ( self )-> str: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def _snake_case ( self )-> Any: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[Any]: lowerCamelCase_ =ConvNextVaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =ConvNextVaForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =ConvNextVaBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase_ =None lowerCamelCase_ =ConvNextVaBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _snake_case ( self )-> Any: lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={"""pixel_values""": pixel_values} return config, inputs_dict def _snake_case ( self )-> Optional[int]: lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCamelCase:str = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCamelCase:Optional[int] = False _UpperCamelCase:List[str] = False _UpperCamelCase:List[str] = False _UpperCamelCase:Union[str, Any] = False _UpperCamelCase:str = False def _snake_case ( self )-> Any: lowerCamelCase_ =ConvNextVaModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )-> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )-> Union[str, Any]: return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def _snake_case ( self )-> Union[str, Any]: pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def _snake_case ( self )-> Optional[int]: pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def _snake_case ( self )-> Optional[int]: pass def _snake_case ( self )-> List[str]: if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_with_labels() lowerCamelCase_ =True if model_class.__name__ in [ *get_values(_SCREAMING_SNAKE_CASE ), *get_values(_SCREAMING_SNAKE_CASE ), ]: continue lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.train() lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ).loss loss.backward() def _snake_case ( self )-> List[str]: if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_with_labels() lowerCamelCase_ =False lowerCamelCase_ =True if ( model_class.__name__ in [*get_values(_SCREAMING_SNAKE_CASE ), *get_values(_SCREAMING_SNAKE_CASE )] or not model_class.supports_gradient_checkpointing ): continue lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.train() lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ).loss loss.backward() def _snake_case ( self )-> Dict: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[int]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self )-> Union[str, Any]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =ConvNextVaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase): @cached_property def _snake_case ( self )-> str: return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def _snake_case ( self )-> List[str]: lowerCamelCase_ =ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_img() lowerCamelCase_ =preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) # verify the logits lowerCamelCase_ =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
708
def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[[] for _ in range(_A )] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(_A ) <= key: return input_string for position, character in enumerate(_A ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_A ) lowerCamelCase_ =["""""".join(_A ) for row in temp_grid] lowerCamelCase_ ="""""".join(_A ) return output_string def __UpperCamelCase ( _A : str , _A : int ) ->str: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string lowerCamelCase_ =[[] for _ in range(_A )] # generates template for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) lowerCamelCase_ =0 for row in temp_grid: # fills in the characters lowerCamelCase_ =input_string[counter : counter + len(_A )] grid.append(list(_A ) ) counter += len(_A ) lowerCamelCase_ ="""""" # reads as zigzag for position in range(len(_A ) ): lowerCamelCase_ =position % (lowest * 2) # puts it in bounds lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __UpperCamelCase ( _A : str ) ->dict[int, str]: """simple docstring""" lowerCamelCase_ ={} for key_guess in range(1 , len(_A ) ): # tries every key lowerCamelCase_ =decrypt(_A , _A ) return results if __name__ == "__main__": import doctest doctest.testmod()
75
0
def __lowercase ( _A : Dict ) ->Dict: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') __A : str = int(input('Enter number: ').strip()) print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
709
from typing import Any class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =data lowerCamelCase_ =None class _SCREAMING_SNAKE_CASE : def __init__( self )-> Any: lowerCamelCase_ =None def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.head while temp is not None: print(temp.data , end=""" """ ) lowerCamelCase_ =temp.next print() def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =Node(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.head lowerCamelCase_ =new_node def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: if node_data_a == node_data_a: return else: lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next lowerCamelCase_ =self.head while node_a is not None and node_a.data != node_data_a: lowerCamelCase_ =node_a.next if node_a is None or node_a is None: return lowerCamelCase_ , lowerCamelCase_ =node_a.data, node_a.data if __name__ == "__main__": __A : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
75
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __UpperCamelCase ( _A : List[Any] , _A : int , _A : Dict ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =AutoConfig.from_pretrained(_A ) lowerCamelCase_ =FlaxAutoModelForSeqaSeqLM.from_config(config=_A ) lowerCamelCase_ =checkpoints.load_tax_checkpoint(_A ) lowerCamelCase_ ='wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": lowerCamelCase_ ='SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase_ ='LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ ='TransientGlobalSelfAttention' else: raise ValueError( """Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`""" """ attribute with a value from [\'local\', \'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase_ =f'layers_{str(_A )}' # Self-Attention lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization lowerCamelCase_ =tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning lowerCamelCase_ =flax_model.params['encoder']['block'][str(_A )]['layer'] lowerCamelCase_ =tax_attention_key lowerCamelCase_ =tax_attention_out lowerCamelCase_ =tax_attention_query lowerCamelCase_ =tax_attention_value lowerCamelCase_ =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ =tax_global_layer_norm if split_mlp_wi: lowerCamelCase_ =tax_mlp_wi_a lowerCamelCase_ =tax_mlp_wi_a else: lowerCamelCase_ =tax_mlp_wi lowerCamelCase_ =tax_mlp_wo lowerCamelCase_ =tax_mlp_layer_norm lowerCamelCase_ =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase_ =tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T lowerCamelCase_ =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase_ =tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T lowerCamelCase_ =tax_encoder_global_rel_embedding # Assigning lowerCamelCase_ =tax_model['target']['encoder']['encoder_norm']['scale'] lowerCamelCase_ =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase_ =f'layers_{str(_A )}' # Self-Attention lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] lowerCamelCase_ =tax_enc_dec_attention_module['key']['kernel'] lowerCamelCase_ =tax_enc_dec_attention_module['out']['kernel'] lowerCamelCase_ =tax_enc_dec_attention_module['query']['kernel'] lowerCamelCase_ =tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization lowerCamelCase_ =tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning lowerCamelCase_ =flax_model.params['decoder']['block'][str(_A )]['layer'] lowerCamelCase_ =tax_attention_key lowerCamelCase_ =tax_attention_out lowerCamelCase_ =tax_attention_query lowerCamelCase_ =tax_attention_value lowerCamelCase_ =tax_pre_attention_layer_norm lowerCamelCase_ =tax_enc_dec_attention_key lowerCamelCase_ =tax_enc_dec_attention_out lowerCamelCase_ =tax_enc_dec_attention_query lowerCamelCase_ =tax_enc_dec_attention_value lowerCamelCase_ =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase_ =tax_mlp_wi_a lowerCamelCase_ =tax_mlp_wi_a else: lowerCamelCase_ =tax_mlp_wi lowerCamelCase_ =tax_mlp_wo lowerCamelCase_ =txa_mlp_layer_norm lowerCamelCase_ =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase_ =tax_model['target']['decoder']['decoder_norm']['scale'] lowerCamelCase_ =txa_decoder_norm # Only for layer 0: lowerCamelCase_ =tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T lowerCamelCase_ =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase_ =tax_model['target']['token_embedder']['embedding'] lowerCamelCase_ =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase_ =tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(_A ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) __A : List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
710
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Dict = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = "yolos" def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=[512, 864] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , )-> Tuple: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =qkv_bias lowerCamelCase_ =num_detection_tokens lowerCamelCase_ =use_mid_position_embeddings lowerCamelCase_ =auxiliary_loss # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = version.parse("1.11") @property def _snake_case ( self )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self )-> float: return 1E-4 @property def _snake_case ( self )-> int: return 12
75
0
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __A : List[Any] = logging.get_logger(__name__) __A : Union[str, Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __A : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __UpperCamelCase ( _A : Optional[int] , _A : List[str] , _A : Tuple , _A : Dict , _A : List[str] ) ->Tuple: """simple docstring""" for attribute in key.split(""".""" ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) if weight_type is not None: lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape else: lowerCamelCase_ =hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": lowerCamelCase_ =value elif weight_type == "weight_g": lowerCamelCase_ =value elif weight_type == "weight_v": lowerCamelCase_ =value elif weight_type == "bias": lowerCamelCase_ =value else: lowerCamelCase_ =value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def __UpperCamelCase ( _A : List[str] , _A : Tuple ) ->List[str]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =fairseq_model.state_dict() lowerCamelCase_ =hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowerCamelCase_ =None for name, value in fairseq_dict.items(): lowerCamelCase_ =False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == """group""" , ) lowerCamelCase_ =True elif name.split(""".""" )[0] == "proj": lowerCamelCase_ =fairseq_model.proj lowerCamelCase_ =True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCamelCase_ =True if "*" in mapped_key: lowerCamelCase_ =name.split(__snake_case )[0].split(""".""" )[-2] lowerCamelCase_ =mapped_key.replace("""*""" , __snake_case ) if "weight_g" in name: lowerCamelCase_ ="""weight_g""" elif "weight_v" in name: lowerCamelCase_ ="""weight_v""" elif "bias" in name: lowerCamelCase_ ="""bias""" elif "weight" in name: lowerCamelCase_ ="""weight""" else: lowerCamelCase_ =None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f'Unused weights: {unused_weights}' ) return proj_weight def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple , _A : Union[str, Any] , _A : Dict ) ->int: """simple docstring""" lowerCamelCase_ =full_name.split("""conv_layers.""" )[-1] lowerCamelCase_ =name.split(""".""" ) lowerCamelCase_ =int(items[0] ) lowerCamelCase_ =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) lowerCamelCase_ =value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) lowerCamelCase_ =value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) lowerCamelCase_ =value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) lowerCamelCase_ =value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__snake_case ) def __UpperCamelCase ( _A : Optional[int] ) ->Any: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def __UpperCamelCase ( _A : str ) ->str: """simple docstring""" with open(__snake_case , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase_ =f.readlines() lowerCamelCase_ =[line.split(""" """ )[0] for line in lines] lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ ={ """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __UpperCamelCase ( _A : List[Any] , _A : List[str] , _A : List[Any] , _A : Tuple , _A : Union[str, Any] , _A : str , _A : Union[str, Any] , ) ->List[Any]: """simple docstring""" lowerCamelCase_ =WavaVecaConfig.from_pretrained(__snake_case ) lowerCamelCase_ =SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) lowerCamelCase_ =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) lowerCamelCase_ =model[0].eval() # set weights for wav2vec2 encoder lowerCamelCase_ =WavaVecaModel(__snake_case ) lowerCamelCase_ =recursively_load_weights_wavaveca(model.encoder , __snake_case ) lowerCamelCase_ =SpeechaTextaForCausalLM(__snake_case ) lowerCamelCase_ , lowerCamelCase_ =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove("""embed_out""" ) lowerCamelCase_ =nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) lowerCamelCase_ =SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) lowerCamelCase_ =False # add projection layer lowerCamelCase_ =nn.Parameter(projection_layer.weight ) lowerCamelCase_ =nn.Parameter(projection_layer.bias ) lowerCamelCase_ =create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , """vocab.json""" ) , """w""" ) as fp: json.dump(__snake_case , __snake_case ) lowerCamelCase_ =SpeechaTextaTokenizer(os.path.join(__snake_case , """vocab.json""" ) ) tokenizer.save_pretrained(__snake_case ) lowerCamelCase_ =hf_wavavec.config.to_dict() lowerCamelCase_ =tokenizer.pad_token_id lowerCamelCase_ =tokenizer.bos_token_id lowerCamelCase_ =tokenizer.eos_token_id lowerCamelCase_ ="""speech_to_text_2""" lowerCamelCase_ ="""wav2vec2""" lowerCamelCase_ =SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __A : List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
711
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __A : List[Any] = 'src/transformers' __A : Tuple = 'docs/source/en' __A : Optional[int] = '.' def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : Optional[Any] ) ->Optional[Any]: """simple docstring""" with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase_ =f.readlines() # Find the start prompt. lowerCamelCase_ =0 while not lines[start_index].startswith(_A ): start_index += 1 start_index += 1 lowerCamelCase_ =start_index while not lines[end_index].startswith(_A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __A : Dict = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. __A : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') __A : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __A : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # This is to make sure the transformers module imported is the one in the repo. __A : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) def __UpperCamelCase ( _A : List[Any] ) ->str: """simple docstring""" lowerCamelCase_ =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _A ) return [m.group(0 ) for m in matches] def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =2 if text == """✅""" or text == """❌""" else len(_A ) lowerCamelCase_ =(width - text_length) // 2 lowerCamelCase_ =width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __UpperCamelCase ( ) ->Any: """simple docstring""" lowerCamelCase_ =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowerCamelCase_ ={ name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowerCamelCase_ ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) lowerCamelCase_ =collections.defaultdict(_A ) # Let's lookup through all transformers object (once). for attr_name in dir(_A ): lowerCamelCase_ =None if attr_name.endswith("""Tokenizer""" ): lowerCamelCase_ =slow_tokenizers lowerCamelCase_ =attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowerCamelCase_ =fast_tokenizers lowerCamelCase_ =attr_name[:-13] elif _re_tf_models.match(_A ) is not None: lowerCamelCase_ =tf_models lowerCamelCase_ =_re_tf_models.match(_A ).groups()[0] elif _re_flax_models.match(_A ) is not None: lowerCamelCase_ =flax_models lowerCamelCase_ =_re_flax_models.match(_A ).groups()[0] elif _re_pt_models.match(_A ) is not None: lowerCamelCase_ =pt_models lowerCamelCase_ =_re_pt_models.match(_A ).groups()[0] if lookup_dict is not None: while len(_A ) > 0: if attr_name in model_name_to_prefix.values(): lowerCamelCase_ =True break # Try again after removing the last word in the name lowerCamelCase_ ="""""".join(camel_case_split(_A )[:-1] ) # Let's build that table! lowerCamelCase_ =list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowerCamelCase_ =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowerCamelCase_ =[len(_A ) + 2 for c in columns] lowerCamelCase_ =max([len(_A ) for name in model_names] ) + 2 # Build the table per se lowerCamelCase_ ="""|""" + """|""".join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowerCamelCase_ ={True: """✅""", False: """❌"""} for name in model_names: lowerCamelCase_ =model_name_to_prefix[name] lowerCamelCase_ =[ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n" return table def __UpperCamelCase ( _A : str=False ) ->Optional[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_find_text_in_file( filename=os.path.join(_A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowerCamelCase_ =get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __A : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
75
0
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , )-> str: lowerCamelCase_ =size if size is not None else {"""height""": 18, """width""": 18} lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =image_size lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =do_normalize def _snake_case ( self )-> Union[str, Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( lowercase_ , unittest.TestCase): _UpperCamelCase:Optional[int] = ImageGPTImageProcessor if is_vision_available() else None def _snake_case ( self )-> int: lowerCamelCase_ =ImageGPTImageProcessingTester(self ) @property def _snake_case ( self )-> int: return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self )-> int: lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """clusters""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) def _snake_case ( self )-> str: lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) lowerCamelCase_ =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCamelCase_ ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ =os.path.join(lowerCamelCase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowerCamelCase_ ) lowerCamelCase_ =self.image_processing_class.from_json_file(lowerCamelCase_ ).to_dict() lowerCamelCase_ =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCamelCase_ ) def _snake_case ( self )-> Any: lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCamelCase_ ) lowerCamelCase_ =self.image_processing_class.from_pretrained(lowerCamelCase_ ).to_dict() lowerCamelCase_ =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCamelCase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def _snake_case ( self )-> Union[str, Any]: pass def __UpperCamelCase ( ) ->int: """simple docstring""" lowerCamelCase_ =load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowerCamelCase_ =Image.open(dataset[4]["""file"""] ) lowerCamelCase_ =Image.open(dataset[5]["""file"""] ) lowerCamelCase_ =[imagea, imagea] return images @require_vision @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): @slow def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowerCamelCase_ =prepare_images() # test non-batched lowerCamelCase_ =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowerCamelCase_ =[306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase_ ) # test batched lowerCamelCase_ =image_processing(lowerCamelCase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowerCamelCase_ =[303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase_ )
712
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =num_stages lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =out_features lowerCamelCase_ =num_labels lowerCamelCase_ =scope lowerCamelCase_ =num_stages def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def _snake_case ( self )-> List[Any]: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _snake_case ( self )-> Union[str, Any]: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _snake_case ( self )-> str: lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =config_and_inputs lowerCamelCase_ ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else () _UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Dict = False _UpperCamelCase:int = False _UpperCamelCase:Any = False _UpperCamelCase:Optional[Any] = False _UpperCamelCase:Optional[Any] = False def _snake_case ( self )-> int: lowerCamelCase_ =UperNetModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )-> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )-> Tuple: return def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _snake_case ( self )-> str: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _snake_case ( self )-> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _snake_case ( self )-> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _snake_case ( self )-> str: pass def _snake_case ( self )-> Optional[int]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _snake_case ( self )-> Dict: pass @slow def _snake_case ( self )-> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( ) ->Tuple: """simple docstring""" lowerCamelCase_ =hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) lowerCamelCase_ =Image.open(_A ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def _snake_case ( self )-> List[Any]: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) def _snake_case ( self )-> int: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
75
0
def __UpperCamelCase ( _A : bytes ) ->Dict: """simple docstring""" return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] ) def __UpperCamelCase ( _A : str ) ->Optional[int]: """simple docstring""" if (len(A__ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(A__ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
713
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = ["pixel_values"] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] ) lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( _SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature: lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ ={"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
75
0
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _SCREAMING_SNAKE_CASE = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=3 , )-> str: lowerCamelCase_ =parent lowerCamelCase_ =do_resize lowerCamelCase_ =size if size is not None else {"shortest_edge": 288} lowerCamelCase_ =size_divisor lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =do_center_crop lowerCamelCase_ =image_mean lowerCamelCase_ =image_std lowerCamelCase_ =do_pad lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution def _snake_case ( self )-> int: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Optional[Any]: if not batched: lowerCamelCase_ =self.size["shortest_edge"] lowerCamelCase_ =image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): lowerCamelCase_ =image.size else: lowerCamelCase_ =image.shape[1], image.shape[2] lowerCamelCase_ =size / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if h < w: lowerCamelCase_ =size, scale * w else: lowerCamelCase_ =scale * h, size lowerCamelCase_ =int((1333 / 800) * size ) if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > max_size: lowerCamelCase_ =max_size / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =newh * scale lowerCamelCase_ =neww * scale lowerCamelCase_ =int(newh + 0.5 ), int(neww + 0.5 ) lowerCamelCase_ =( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCamelCase_ =[] for image in image_inputs: lowerCamelCase_ =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ =max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] lowerCamelCase_ =max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase): _UpperCamelCase:Union[str, Any] = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case ( self )-> List[Any]: lowerCamelCase_ =BridgeTowerImageProcessingTester(self ) @property def _snake_case ( self )-> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self )-> Dict: lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size_divisor""" ) ) def _snake_case ( self )-> str: pass def _snake_case ( self )-> Optional[Any]: # Initialize image processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCamelCase_ =self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values lowerCamelCase_ =self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self )-> Optional[int]: # Initialize image processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCamelCase_ =self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values lowerCamelCase_ =self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self )-> Union[str, Any]: # Initialize image processor lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCamelCase_ =self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values lowerCamelCase_ =self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
714
# Imports import numpy as np class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: if red is not None: lowerCamelCase_ =red if green is not None: lowerCamelCase_ =green if blue is not None: lowerCamelCase_ =blue if red_edge is not None: lowerCamelCase_ =red_edge if nir is not None: lowerCamelCase_ =nir return True def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={ """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def _snake_case ( self )-> Optional[Any]: return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case ( self )-> Tuple: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case ( self )-> str: return self.nir * (self.red / (self.green**2)) def _snake_case ( self )-> Optional[int]: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case ( self )-> Tuple: return (self.nir - self.red) / (self.nir + self.red) def _snake_case ( self )-> Dict: return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case ( self )-> List[Any]: return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case ( self )-> Tuple: return (self.nir - self.green) / (self.nir + self.green) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case ( self )-> List[str]: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case ( self )-> Optional[int]: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case ( self )-> Tuple: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case ( self )-> Any: return (self.nir / self.green) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.nir / self.redEdge) - 1 def _snake_case ( self )-> Union[str, Any]: return (self.red - self.blue) / self.red def _snake_case ( self )-> Dict: lowerCamelCase_ =self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case ( self )-> int: return self.nir - self.green def _snake_case ( self )-> Dict: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case ( self )-> List[str]: lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]: return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case ( self )-> int: return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: return (self.nir - b) / (a * self.red) def _snake_case ( self )-> int: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case ( self )-> Optional[Any]: return (self.red + self.green + self.blue) / 3_0.5 def _snake_case ( self )-> List[str]: return self.nir / self.red def _snake_case ( self )-> List[str]: return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case ( self )-> str: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case ( self )-> List[Any]: return self.green / (self.nir + self.red + self.green) def _snake_case ( self )-> Dict: return self.nir / (self.nir + self.red + self.green) def _snake_case ( self )-> List[str]: return self.red / (self.nir + self.red + self.green) def _snake_case ( self )-> int: return (self.green - self.red) / (self.green + self.red) def _snake_case ( self )-> str: return (self.red - self.green) / (self.red + self.green) def _snake_case ( self )-> str: lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case ( self )-> List[str]: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case ( self )-> List[Any]: return self.nir / self.red def _snake_case ( self )-> Optional[int]: return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case ( self )-> str: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
75
0