code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : int = logging.get_logger(__name__)
a : Optional[Any] = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """deta"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Any=900 , a_ : Union[str, Any]=2_048 , a_ : Optional[int]=6 , a_ : str=2_048 , a_ : Any=8 , a_ : List[str]=6 , a_ : Optional[Any]=1_024 , a_ : Union[str, Any]=8 , a_ : str=0.0 , a_ : Tuple=True , a_ : Optional[int]="relu" , a_ : str=256 , a_ : int=0.1 , a_ : int=0.0 , a_ : Any=0.0 , a_ : List[str]=0.02 , a_ : List[str]=1.0 , a_ : Union[str, Any]=True , a_ : List[Any]=False , a_ : Any="sine" , a_ : List[Any]=5 , a_ : int=4 , a_ : str=4 , a_ : Any=True , a_ : Union[str, Any]=300 , a_ : List[Any]=True , a_ : Any=True , a_ : Dict=1 , a_ : Tuple=5 , a_ : Union[str, Any]=2 , a_ : List[Any]=1 , a_ : Optional[Any]=1 , a_ : Union[str, Any]=5 , a_ : int=2 , a_ : Dict=0.1 , a_ : Union[str, Any]=0.25 , **a_ : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a_ , a_ ):
__snake_case = backbone_config.pop("model_type" )
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(a_ )
__snake_case = backbone_config
__snake_case = num_queries
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
# deformable attributes
__snake_case = num_feature_levels
__snake_case = encoder_n_points
__snake_case = decoder_n_points
__snake_case = two_stage
__snake_case = two_stage_num_proposals
__snake_case = with_box_refine
__snake_case = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
__snake_case = focal_alpha
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self : int ):
"""simple docstring"""
return self.d_model
def A ( self : int ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.backbone_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 69 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case (_a ):
lowerCAmelCase__ = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCAmelCase__ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCAmelCase__ = "document_qa"
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = VisionEncoderDecoderModel
lowerCAmelCase__ = ["image", "text"]
lowerCAmelCase__ = ["text"]
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
_lowerCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[Any] = re.sub(R"""<.*?>""" , """""" , _UpperCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase : Tuple = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 429 | 0 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : List[Any]=None ) -> Tuple:
A_ = None
if token is not None:
A_ = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A_ = requests.get(lowerCamelCase_, headers=lowerCamelCase_ ).json()
A_ = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
A_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(lowerCamelCase_ ):
A_ = requests.get(url + F'''&page={i + 2}''', headers=lowerCamelCase_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : int=None ) -> Any:
A_ = None
if token is not None:
A_ = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
A_ = requests.get(lowerCamelCase_, headers=lowerCamelCase_ ).json()
A_ = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
A_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(lowerCamelCase_ ):
A_ = requests.get(url + F'''&page={i + 2}''', headers=lowerCamelCase_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Union[str, Any], _UpperCamelCase : Optional[Any], _UpperCamelCase : Union[str, Any] ) -> Tuple:
A_ = None
if token is not None:
A_ = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
A_ = requests.get(lowerCamelCase_, headers=lowerCamelCase_, allow_redirects=lowerCamelCase_ )
A_ = result.headers['Location']
A_ = requests.get(lowerCamelCase_, allow_redirects=lowerCamelCase_ )
A_ = os.path.join(lowerCamelCase_, F'''{artifact_name}.zip''' )
with open(lowerCamelCase_, '''wb''' ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : List[str]=None ) -> List[str]:
A_ = []
A_ = []
A_ = None
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_ ) as f:
for line in f:
A_ = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A_ = line[: line.index(''': ''' )]
A_ = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
A_ = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCamelCase_ )
elif filename == "job_name.txt":
A_ = line
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_ )} for `errors` '''
F'''and {len(lowerCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
A_ = None
if job_name and job_links:
A_ = job_links.get(lowerCamelCase_, lowerCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
A_ = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_, lowerCamelCase_ )]
return result
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Optional[int]=None ) -> Any:
A_ = []
A_ = [os.path.join(lowerCamelCase_, lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_, job_links=lowerCamelCase_ ) )
return errors
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : Dict=None ) -> Any:
A_ = Counter()
counter.update([x[1] for x in logs] )
A_ = counter.most_common()
A_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A_ = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
A_ = dict(sorted(r.items(), key=lambda _UpperCamelCase : item[1]["count"], reverse=lowerCamelCase_ ) )
return r
def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> int:
A_ = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
A_ = test.split('''/''' )[2]
else:
A_ = None
return test
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : Dict=None ) -> int:
A_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
A_ = [x for x in logs if x[2] is not None]
A_ = {x[2] for x in logs}
A_ = {}
for test in tests:
A_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A_ = counter.most_common()
A_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A_ = sum(error_counts.values() )
if n_errors > 0:
A_ = {'count': n_errors, 'errors': error_counts}
A_ = dict(sorted(r.items(), key=lambda _UpperCamelCase : item[1]["count"], reverse=lowerCamelCase_ ) )
return r
def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
A_ = '| no. | error | status |'
A_ = '|-:|:-|:-|'
A_ = [header, sep]
for error in reduced_by_error:
A_ = reduced_by_error[error]['count']
A_ = F'''| {count} | {error[:1_00]} | |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Any:
A_ = '| model | no. of errors | major error | count |'
A_ = '|-:|-:|-:|-:|'
A_ = [header, sep]
for model in reduced_by_model:
A_ = reduced_by_model[model]['count']
A_ = list(reduced_by_model[model]['''errors'''].items() )[0]
A_ = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__snake_case : str = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__snake_case : str = get_job_links(args.workflow_run_id, token=args.token)
__snake_case : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__snake_case : List[Any] = k.find(' / ')
__snake_case : Optional[int] = k[index + len(' / ') :]
__snake_case : int = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__snake_case : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__snake_case : Union[str, Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__snake_case : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__snake_case : List[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__snake_case : Dict = reduce_by_error(errors)
__snake_case : int = reduce_by_model(errors)
__snake_case : Optional[int] = make_github_table(reduced_by_error)
__snake_case : Dict = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 706 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__snake_case : str = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCamelCase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
if self.framework == "tf":
A_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __A ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
A_ = self.get_masked_index(_SCREAMING_SNAKE_CASE )
A_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
if return_tensors is None:
A_ = self.framework
A_ = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
return model_inputs
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
A_ = self.model(**_SCREAMING_SNAKE_CASE )
A_ = model_inputs['''input_ids''']
return model_outputs
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A_ = target_ids.shape[0]
A_ = model_outputs['''input_ids'''][0]
A_ = model_outputs['''logits''']
if self.framework == "tf":
A_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ = outputs.numpy()
A_ = outputs[0, masked_index, :]
A_ = stable_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
A_ = tf.gather_nd(tf.squeeze(_SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ = tf.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
A_ = tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE )
A_ ,A_ = topk.values.numpy(), topk.indices.numpy()
else:
A_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ = outputs[0, masked_index, :]
A_ = logits.softmax(dim=-1 )
if target_ids is not None:
A_ = probs[..., target_ids]
A_ ,A_ = probs.topk(_SCREAMING_SNAKE_CASE )
A_ = []
A_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ = input_ids.numpy().copy()
if target_ids is not None:
A_ = target_ids[p].tolist()
A_ = p
# Filter padding out:
A_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_SCREAMING_SNAKE_CASE )
result.append(_SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = [targets]
try:
A_ = self.tokenizer.get_vocab()
except Exception:
A_ = {}
A_ = []
for target in targets:
A_ = vocab.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if id_ is None:
A_ = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , max_length=1 , truncation=_SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(_SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
A_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
A_ = list(set(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
A_ = np.array(_SCREAMING_SNAKE_CASE )
return target_ids
def __A ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
A_ = {}
if targets is not None:
A_ = self.get_target_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = target_ids
if top_k is not None:
A_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 174 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A_ = 1_000_000 ):
UpperCAmelCase_ = set(range(3 , A_ , 2 ) )
primes.add(2 )
for p in range(3 , A_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A_ , A_ ) ) )
UpperCAmelCase_ = [float(A_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A_ , limit + 1 , A_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 660 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case : Union[str, Any] = Lock()
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(SCREAMING_SNAKE_CASE_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a__ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(SCREAMING_SNAKE_CASE_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a__ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(SCREAMING_SNAKE_CASE_ )
def __lowercase ( __lowerCAmelCase : List[Any] ):
a__ = []
a__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a__ = Pipe()
a__ = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
a__ = temp_rs
a__ = temp_rr
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
a__ = Pipe()
a__ = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
a__ = temp_rs
a__ = temp_rr
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE_ , args=(
len(SCREAMING_SNAKE_CASE_ ) - 1,
arr[len(SCREAMING_SNAKE_CASE_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(SCREAMING_SNAKE_CASE_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
a__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowercase ( ):
a__ = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*SCREAMING_SNAKE_CASE_ )
a__ = odd_even_transposition(SCREAMING_SNAKE_CASE_ )
print('Sorted List\n' )
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 703 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
UpperCAmelCase__ : List[str] = do_thumbnail
UpperCAmelCase__ : Optional[int] = do_align_axis
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : Tuple = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean
UpperCAmelCase__ : List[Any] = image_std
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = DonutImageProcessor if is_vision_available() else None
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self )
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"""do_resize""" ) )
self.assertTrue(hasattr(A ,"""size""" ) )
self.assertTrue(hasattr(A ,"""do_thumbnail""" ) )
self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) )
self.assertTrue(hasattr(A ,"""do_pad""" ) )
self.assertTrue(hasattr(A ,"""do_normalize""" ) )
self.assertTrue(hasattr(A ,"""image_mean""" ) )
self.assertTrue(hasattr(A ,"""image_std""" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} )
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@is_flaky()
def __lowercase ( self : int ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : List[str] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : Any ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 65 |
def _lowerCAmelCase ( __magic_name__ :list ):
if any(not isinstance(__magic_name__ , __magic_name__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(__magic_name__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__magic_name__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 121 | 0 |
"""simple docstring"""
lowerCamelCase__ = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages | 549 |
"""simple docstring"""
class A__ :
def __init__( self ):
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Any = 0
__lowerCAmelCase : List[Any] = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if vertex not in self.adjacency:
__lowerCAmelCase : Dict = {}
self.num_vertices += 1
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.add_vertex(_SCREAMING_SNAKE_CASE )
self.add_vertex(_SCREAMING_SNAKE_CASE )
if head == tail:
return
__lowerCAmelCase : Union[str, Any] = weight
__lowerCAmelCase : str = weight
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.get_edges()
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : List[str] = list(edges[i] )
edges.sort(key=lambda _SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCAmelCase : Dict = edges[i][2] + 1
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = edge
__lowerCAmelCase : Union[str, Any] = weight
__lowerCAmelCase : Optional[int] = weight
def __str__( self ):
__lowerCAmelCase : List[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCAmelCase : str = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('\n' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCamelCase ( self ):
return self.adjacency.keys()
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : List[str] = Graph()
if vertices is None:
__lowerCAmelCase : int = []
if edges is None:
__lowerCAmelCase : Optional[int] = []
for vertex in vertices:
g.add_vertex(_SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*_SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self ):
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : Optional[int] = {}
def __len__( self ):
return len(self.parent )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if item in self.parent:
return self.find(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = item
__lowerCAmelCase : int = 0
return item
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if item not in self.parent:
return self.make_set(_SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
__lowerCAmelCase : Dict = self.find(self.parent[item] )
return self.parent[item]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.find(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.find(_SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCAmelCase : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCAmelCase : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCAmelCase : Optional[int] = roota
return roota
return None
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = graph.num_vertices
__lowerCAmelCase : List[Any] = Graph.UnionFind()
__lowerCAmelCase : Tuple = []
while num_components > 1:
__lowerCAmelCase : str = {}
for vertex in graph.get_vertices():
__lowerCAmelCase : Union[str, Any] = -1
__lowerCAmelCase : int = graph.get_edges()
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = edge
__lowerCAmelCase : Optional[int] = union_find.find(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = union_find.find(_SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCAmelCase : List[str] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = cheap_edge[vertex]
if union_find.find(_SCREAMING_SNAKE_CASE ) != union_find.find(_SCREAMING_SNAKE_CASE ):
union_find.union(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
__lowerCAmelCase : List[str] = num_components - 1
__lowerCAmelCase : Union[str, Any] = Graph.build(edges=_SCREAMING_SNAKE_CASE )
return mst | 549 | 1 |
def a_ ( lowerCAmelCase_ : list[list] ):
__lowerCAmelCase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = row[0]
for column_index, column in enumerate(lowerCAmelCase_ ):
if magnitude == 0:
__lowerCAmelCase = column
continue
__lowerCAmelCase = column / magnitude
# Subtract to cancel term
__lowerCAmelCase = current_set[0]
__lowerCAmelCase = [first_row]
__lowerCAmelCase = current_set[1::]
for row in current_set:
__lowerCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase_ )
continue
for column_index in range(len(lowerCAmelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__lowerCAmelCase = final_set[0]
__lowerCAmelCase = []
__lowerCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__lowerCAmelCase = simplify(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCAmelCase_ )
__lowerCAmelCase = resultant
return final_set
def a_ ( lowerCAmelCase_ : list[list] ):
if len(lowerCAmelCase_ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
__lowerCAmelCase = len(lowerCAmelCase_ ) + 1
if any(len(lowerCAmelCase_ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowerCAmelCase_, (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowerCAmelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
__lowerCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
__lowerCAmelCase = data_set.copy()
__lowerCAmelCase = []
for row_index, row in enumerate(lowerCAmelCase_ ):
if 0 not in row:
__lowerCAmelCase = data_set.pop(lowerCAmelCase_ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0, lowerCAmelCase_ )
__lowerCAmelCase = data_set.copy()
__lowerCAmelCase = simplify(lowerCAmelCase_ )
__lowerCAmelCase = simplified[::-1]
__lowerCAmelCase = []
for row in simplified:
__lowerCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__lowerCAmelCase = row.copy()[: len(lowerCAmelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase_ ) == 0:
solutions.append(0 )
continue
__lowerCAmelCase = temp_row[1::]
__lowerCAmelCase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase_ )
__lowerCAmelCase = []
for item in solutions:
final.append(float(round(lowerCAmelCase_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 53 |
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ : Optional[Any] = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ : Union[str, Any] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase_ : Any = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ : Any = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ : Tuple = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase_ : Dict = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase_ : Union[str, Any] = re.compile(r"""^\s*else:""")
def A__ ( lowerCamelCase ) -> List[Any]:
if _re_test_backend.search(lowerCamelCase ) is None:
return None
UpperCamelCase_: Any = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Dict = f.readlines()
UpperCamelCase_: Tuple = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase_: Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase_: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
UpperCamelCase_: str = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
UpperCamelCase_: Tuple = re.findall(r"""\[([^\]]+)\]""" , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase_: Any = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
UpperCamelCase_: Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase_: Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase_: Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase_: Tuple = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
UpperCamelCase_: List[str] = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: str = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
UpperCamelCase_: Tuple = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: List[Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase_: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase_: List[str] = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase_: List[str] = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase_: Any = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase_: str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase_: Tuple = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase_: Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
def find_duplicates(lowerCamelCase ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase_: Optional[int] = []
for key in import_dict_objects.keys():
UpperCamelCase_: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCamelCase_: Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase_: int = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def A__ ( ) -> Tuple:
UpperCamelCase_: Optional[int] = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
UpperCamelCase_: Union[str, Any] = os.path.join(lowerCamelCase , """__init__.py""" )
UpperCamelCase_: Optional[int] = parse_init(lowerCamelCase )
if objects is not None:
UpperCamelCase_: Any = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Any = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase ) )
def A__ ( ) -> Any:
UpperCamelCase_: List[Any] = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase_: str = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase_: Dict = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
lowerCamelCase_ : Optional[int] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCamelCase_: Optional[Any] = direct_transformers_import(lowerCamelCase )
UpperCamelCase_: Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase_: List[Any] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase ) ) )
UpperCamelCase_: Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Dict = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 548 | 0 |
from statistics import mean, stdev
def __lowerCAmelCase ( __snake_case , __snake_case = 3 ):
__lowerCAmelCase = min(__snake_case )
__lowerCAmelCase = max(__snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __snake_case ) for x in data]
def __lowerCAmelCase ( __snake_case , __snake_case = 3 ):
__lowerCAmelCase = mean(__snake_case )
__lowerCAmelCase = stdev(__snake_case )
# standardize data
return [round((x - mu) / (sigma) , __snake_case ) for x in data]
| 290 |
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = len(__snake_case )
while cur > 1:
# Find the maximum number in arr
__lowerCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__lowerCAmelCase = arr[mi::-1] + arr[mi + 1 : len(__snake_case )]
# Reverse whole list
__lowerCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(__snake_case )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 290 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 171 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( UpperCamelCase_ ):
_a = '''Salesforce/blip-image-captioning-base'''
_a = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
_a = '''image_captioner'''
_a = AutoModelForVisionaSeq
_a = ['''image''']
_a = ['''text''']
def __init__( self : Optional[Any] , *A_ : Dict , **A_ : List[str]):
requires_backends(self , ['''vision'''])
super().__init__(*A_ , **A_)
def UpperCAmelCase__ ( self : Any , A_ : "Image"):
return self.pre_processor(images=A_ , return_tensors='''pt''')
def UpperCAmelCase__ ( self : Dict , A_ : Any):
return self.model.generate(**A_)
def UpperCAmelCase__ ( self : List[str] , A_ : Any):
return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_)[0].strip()
| 171 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "vocab.txt"}
a_ = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a_ = {
"facebook/esm2_t6_8M_UR50D": 1024,
"facebook/esm2_t12_35M_UR50D": 1024,
}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as f:
snake_case_ : int = f.read().splitlines()
return [l.strip() for l in lines]
class __lowercase ( UpperCAmelCase_):
"""simple docstring"""
_A : str = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[str] = ['input_ids', 'attention_mask']
def __init__(self , lowercase__ , lowercase__="<unk>" , lowercase__="<cls>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__="<eos>" , **lowercase__ , ):
super().__init__(**_lowercase )
snake_case_ : Dict = load_vocab_file(_lowercase )
snake_case_ : Tuple = dict(enumerate(self.all_tokens ) )
snake_case_ : Dict = {tok: ind for ind, tok in enumerate(self.all_tokens )}
snake_case_ : Optional[Any] = unk_token
snake_case_ : Dict = cls_token
snake_case_ : List[Any] = pad_token
snake_case_ : List[Any] = mask_token
snake_case_ : Dict = eos_token
snake_case_ : Dict = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __UpperCamelCase (self , lowercase__ ):
return self._id_to_token.get(_lowercase , self.unk_token )
def __UpperCamelCase (self , lowercase__ ):
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) )
def __UpperCamelCase (self , lowercase__ , **lowercase__ ):
return text.split()
def __UpperCamelCase (self , lowercase__=False ):
return len(self._id_to_token )
def __UpperCamelCase (self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __UpperCamelCase (self , lowercase__ ):
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) )
def __UpperCamelCase (self , lowercase__ ):
return self._id_to_token.get(_lowercase , self.unk_token )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Union[str, Any] = [self.cls_token_id]
snake_case_ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
snake_case_ : Optional[int] = [1] + ([0] * len(_lowercase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowercase ) + [1]
return mask
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Any = os.path.join(_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_lowercase , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def __UpperCamelCase (self ):
return self.get_vocab_size(with_added_tokens=_lowercase )
def __UpperCamelCase (self , lowercase__ , lowercase__ = False ):
return super()._add_tokens(_lowercase , special_tokens=_lowercase )
| 707 |
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48 | 0 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ), f"""{len(__UpperCAmelCase )} != {len(__UpperCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(__UpperCAmelCase ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = "student" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
AutoTokenizer.from_pretrained(__UpperCAmelCase ).save_pretrained(__UpperCAmelCase ) # purely for convenience
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ).eval()
else:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"""teacher must be a model or string got type {type(__UpperCAmelCase )}"""
__SCREAMING_SNAKE_CASE = teacher.config.to_diff_dict()
try:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__UpperCAmelCase )
# Copy weights
__SCREAMING_SNAKE_CASE = teacher.config_class(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(__UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__SCREAMING_SNAKE_CASE = student.load_state_dict(teacher.state_dict() , strict=__UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(range(__UpperCAmelCase ) ), list(range(__UpperCAmelCase ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(__UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(__UpperCAmelCase , __UpperCAmelCase )
if d_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(__UpperCAmelCase , __UpperCAmelCase )
try:
if hasattr(
__UpperCAmelCase , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __UpperCAmelCase )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__SCREAMING_SNAKE_CASE = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(__UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 109 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : Any ,lowerCamelCase : Any ,lowerCamelCase : int=3 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Any=3 ,lowerCamelCase : Dict=10 ,lowerCamelCase : Union[str, Any]=[10, 20, 30, 40] ,lowerCamelCase : Any=[1, 1, 2, 1] ,lowerCamelCase : int=True ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Dict="relu" ,lowerCamelCase : str=3 ,lowerCamelCase : Union[str, Any]=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ,lowerCamelCase : Dict ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = RegNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : List[str] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Dict = False
__UpperCamelCase : str = False
__UpperCamelCase : Any = False
__UpperCamelCase : Tuple = False
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=lowerCamelCase )
for name, module in model.named_modules():
if isinstance(lowerCamelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ):
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = RegNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase ,atol=1E-4 ) )
| 109 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 709 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=_lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = ["""note_seq"""]
def __init__( self , *__a , **__a ):
"""simple docstring"""
requires_backends(self , ['note_seq'] )
@classmethod
def _UpperCAmelCase ( cls , *__a , **__a ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
@classmethod
def _UpperCAmelCase ( cls , *__a , **__a ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
| 554 | 0 |
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Any = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCamelCase : int = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCamelCase : Any = min(_lowerCAmelCase , _lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 629 |
def A_ ( _lowerCAmelCase = 1000 ) -> int:
UpperCamelCase : Optional[int] = -1
UpperCamelCase : int = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase : List[str] = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase : Union[str, Any] = a * b * c
if candidate >= product:
UpperCamelCase : Tuple = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 629 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyImgaImgPipeline
lowerCamelCase__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCamelCase__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
snake_case__ : List[str] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case__ : Dict = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[int] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : Dict = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.dummy_text_encoder
snake_case__ : List[str] = self.dummy_tokenizer
snake_case__ : Dict = self.dummy_unet
snake_case__ : List[Any] = self.dummy_movq
snake_case__ : str = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case__ : Any = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
snake_case__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Tuple = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = """cpu"""
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Union[str, Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = output.images
snake_case__ : Optional[int] = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : List[Any] = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
snake_case__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case__ : int = """A red cartoon frog, 4k"""
snake_case__ : Optional[int] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ , snake_case__ : List[Any] = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : Dict = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
snake_case__ : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 419 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : str = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case__ : int = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __UpperCamelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case__ : Any = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __UpperCamelCase ( self ):
import PIL.Image
snake_case__ : Optional[int] = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__SCREAMING_SNAKE_CASE ) as mock_cast_to_python_objects:
snake_case__ : Any = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
snake_case__ , snake_case__ : Dict = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __SCREAMING_SNAKE_CASE )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : int ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = pa.BufferReader(__magic_name__ ) if isinstance(__magic_name__ , pa.Buffer ) else pa.memory_map(__magic_name__ )
snake_case__ : Any = pa.ipc.open_stream(__magic_name__ )
snake_case__ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
snake_case__ : Dict = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case__ , snake_case__ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
snake_case__ : Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__magic_name__ , features=__magic_name__ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
snake_case__ , snake_case__ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case__ : Optional[Any] = pa.BufferReader(output.getvalue() )
snake_case__ : List[str] = pa.ipc.open_stream(__magic_name__ )
snake_case__ : pa.Table = f.read_all()
snake_case__ : Optional[Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__magic_name__ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def UpperCamelCase__ ( __magic_name__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=__magic_name__ , writer_batch_size=__magic_name__ , hash_salt="""split_name""" , check_duplicates=__magic_name__ , ) as writer:
with pytest.raises(__magic_name__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
snake_case__ , snake_case__ : Any = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__magic_name__ , writer_batch_size=__magic_name__ , hash_salt="""split_name""" , check_duplicates=__magic_name__ , ) as writer:
with pytest.raises(__magic_name__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
snake_case__ , snake_case__ : Tuple = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> str:
'''simple docstring'''
snake_case__ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__magic_name__ , writer_batch_size=__magic_name__ , hash_salt="""split_name""" , check_duplicates=__magic_name__ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
snake_case__ , snake_case__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = pa.BufferOutputStream()
snake_case__ : Optional[Any] = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
snake_case__ , snake_case__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case__ : int = pa.BufferOutputStream()
snake_case__ : Dict = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
snake_case__ , snake_case__ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : str ) -> Dict:
'''simple docstring'''
snake_case__ : int = pa.BufferOutputStream()
snake_case__ : int = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
snake_case__ , snake_case__ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
snake_case__ : Union[str, Any] = os.path.join(__magic_name__ , """test.arrow""" )
with ArrowWriter(path=__magic_name__ , schema=pa.schema(__magic_name__ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
snake_case__ , snake_case__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(__magic_name__ , 1 )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Any:
'''simple docstring'''
if pa.types.is_list(__magic_name__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Dict:
'''simple docstring'''
if isinstance(lst[0] , __magic_name__ ):
change_first_primitive_element_in_list(lst[0] , __magic_name__ )
else:
snake_case__ : Tuple = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : List[str] ) -> Any:
'''simple docstring'''
snake_case__ : str = pa.array(TypedSequence(__magic_name__ , optimized_int_type=__magic_name__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = pa.array(OptimizedTypedSequence(__magic_name__ , col=__magic_name__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case__ : Optional[int] = copy.deepcopy(__magic_name__ )
snake_case__ : Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__magic_name__ , __magic_name__ )
snake_case__ : int = pa.array(OptimizedTypedSequence(__magic_name__ , col=__magic_name__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__magic_name__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = """mock://dataset-train.arrow"""
with ArrowWriter(path=__magic_name__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__magic_name__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case__ , snake_case__ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__magic_name__ )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
with ParquetWriter(stream=__magic_name__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case__ , snake_case__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case__ : int = pa.BufferReader(output.getvalue() )
snake_case__ : pa.Table = pq.read_table(__magic_name__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
import PIL.Image
snake_case__ : List[str] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__magic_name__ , format="""png""" )
snake_case__ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=__magic_name__ , features=Features({"""image""": Image()} ) , embed_local_files=__magic_name__ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
snake_case__ : List[Any] = pa.BufferReader(output.getvalue() )
snake_case__ : pa.Table = pq.read_table(__magic_name__ )
snake_case__ : str = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __magic_name__ )
with open(__magic_name__ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__magic_name__ )] )
snake_case__ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(stream=__magic_name__ ) as writer:
writer._build_writer(inferred_schema=__magic_name__ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 419 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__magic_name__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **_A ):
'''simple docstring'''
super().__init__(**_A )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(_A )
def __call__( self , _A , _A = None , **_A , ):
'''simple docstring'''
if "text_queries" in kwargs:
UpperCamelCase : str = kwargs.pop("""text_queries""" )
if isinstance(_A , (str, Image.Image) ):
UpperCamelCase : Optional[Any] = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCamelCase : Any = image
UpperCamelCase : Optional[Any] = super().__call__(_A , **_A )
return results
def _a ( self , **_A ):
'''simple docstring'''
UpperCamelCase : str = {}
if "threshold" in kwargs:
UpperCamelCase : Dict = kwargs["threshold"]
if "top_k" in kwargs:
UpperCamelCase : int = kwargs["top_k"]
return {}, {}, postprocess_params
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = load_image(inputs["""image"""] )
UpperCamelCase : Tuple = inputs["candidate_labels"]
if isinstance(_A , _A ):
UpperCamelCase : int = candidate_labels.split(""",""" )
UpperCamelCase : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_A ):
UpperCamelCase : int = self.tokenizer(_A , return_tensors=self.framework )
UpperCamelCase : Optional[Any] = self.image_processor(_A , return_tensors=self.framework )
yield {
"is_last": i == len(_A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Any = model_inputs.pop("""target_size""" )
UpperCamelCase : List[str] = model_inputs.pop("""candidate_label""" )
UpperCamelCase : Tuple = model_inputs.pop("""is_last""" )
UpperCamelCase : List[Any] = self.model(**_A )
UpperCamelCase : List[str] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _a ( self , _A , _A=0.1 , _A=None ):
'''simple docstring'''
UpperCamelCase : Dict = []
for model_output in model_outputs:
UpperCamelCase : str = model_output["candidate_label"]
UpperCamelCase : Optional[Any] = BaseModelOutput(_A )
UpperCamelCase : List[str] = self.image_processor.post_process_object_detection(
outputs=_A , threshold=_A , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
UpperCamelCase : Any = outputs["scores"][index].item()
UpperCamelCase : Union[str, Any] = self._get_bounding_box(outputs["""boxes"""][index][0] )
UpperCamelCase : Optional[Any] = {"score": score, "label": label, "box": box}
results.append(_A )
UpperCamelCase : int = sorted(_A , key=lambda _A : x["score"] , reverse=_A )
if top_k:
UpperCamelCase : Optional[Any] = results[:top_k]
return results
def _a ( self , _A ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
UpperCamelCase : List[str] = box.int().tolist()
UpperCamelCase : Tuple = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 102 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase = 1_6
_UpperCamelCase = 3_2
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> str:
return int(x / 2**2_0 )
class __a :
"""simple docstring"""
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ : str = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ : List[str] = torch.cuda.memory_allocated()
lowerCAmelCase__ : Optional[int] = torch.cuda.max_memory_allocated()
lowerCAmelCase__ : List[str] = bamb(self.end - self.begin )
lowerCAmelCase__ : Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = 1_6 , lowercase__ = "bert-base-cased" , lowercase__ = 3_2_0 , lowercase__ = 1_6_0 , ) -> str:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase__ : Optional[Any] = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(lowercase__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase__ : Tuple = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase__ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Dict:
# Initialize accelerator
lowerCAmelCase__ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config["lr"]
lowerCAmelCase__ : int = int(config["num_epochs"] )
lowerCAmelCase__ : Tuple = int(config["seed"] )
lowerCAmelCase__ : str = int(config["batch_size"] )
lowerCAmelCase__ : Any = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Any = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase__ : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Any = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase__ : Optional[int] = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : Tuple = 0
# Now we train the model
lowerCAmelCase__ : List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase__ : Optional[Any] = model(**lowercase__ )
lowerCAmelCase__ : Optional[int] = outputs.loss
lowerCAmelCase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCAmelCase__ : int = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=lowercase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase__ , )
parser.add_argument(
"--output_dir" , type=lowercase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=lowercase__ , default=lowercase__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=lowercase__ , default=3_2_0 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=lowercase__ , default=1_6_0 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of train epochs." , )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
lowerCAmelCase__ : Dict = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 453 | 0 |
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Optional[int] = fast.next.next
snake_case__ : int = slow.next
snake_case__ : int = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Union[str, Any] = None
while second:
snake_case__ : Any = second.next
snake_case__ : Tuple = node
snake_case__ : Union[str, Any] = second
snake_case__ : Optional[int] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : int = node.next
snake_case__ : Any = head.next
return True
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Dict = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : List[str] = slow.next
stack.append(slow.val)
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : Dict = cur.next
return True
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Optional[int] = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase_)
else:
snake_case__ : Dict = [pos]
snake_case__ : str = head.next
pos += 1
snake_case__ : Optional[int] = pos - 1
snake_case__ : List[Any] = 0
for v in d.values():
if len(UpperCAmelCase_) % 2 != 0:
middle += 1
else:
snake_case__ : str = 0
for i in range(0 , len(UpperCAmelCase_)):
if v[i] + v[len(UpperCAmelCase_) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 127 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_: int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Union[str, Any] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 127 | 1 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 377 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Model name or path of model to be trained.'''} )
_UpperCAmelCase = field(
default='''./''', metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''', metadata={'''help''': '''Name or path of training dataset.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''', metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase = field(default=2, metadata={'''help''': '''Batch size for training.'''} )
_UpperCAmelCase = field(default=2, metadata={'''help''': '''Batch size for evaluation.'''} )
_UpperCAmelCase = field(default=0.1, metadata={'''help''': '''Value of weight decay.'''} )
_UpperCAmelCase = field(
default=1_00_00, metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
_UpperCAmelCase = field(default=2E-4, metadata={'''help''': '''Learning rate fo training.'''} )
_UpperCAmelCase = field(default='''cosine''', metadata={'''help''': '''Learning rate.'''} )
_UpperCAmelCase = field(
default=7_50, metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
_UpperCAmelCase = field(
default=16, metadata={'''help''': '''Number of gradient accumulation steps.'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
_UpperCAmelCase = field(default=5_00_00, metadata={'''help''': '''Maximum number of training steps.'''} )
_UpperCAmelCase = field(
default=-1, metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase = field(default=10_24, metadata={'''help''': '''Sequence lengths used for training.'''} )
_UpperCAmelCase = field(default=1, metadata={'''help''': '''Training seed.'''} )
_UpperCAmelCase = field(
default=10_24, metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''', metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase = field(default=2, metadata={'''help''': '''Batch size used for evaluation.'''} )
_UpperCAmelCase = field(
default=-1, metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase = field(default=10_24, metadata={'''help''': '''Length of sequences to be evaluated.'''} )
_UpperCAmelCase = field(default=1, metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Number of workers used for code evaluation.'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
_UpperCAmelCase = field(default=0.2, metadata={'''help''': '''Sampling temperature used for generation.'''} )
_UpperCAmelCase = field(default=2_56, metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
_UpperCAmelCase = field(default=0, metadata={'''help''': '''Top-k parameter used for generation.'''} )
_UpperCAmelCase = field(default=0.95, metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
_UpperCAmelCase = field(default=10, metadata={'''help''': '''Number of generations to run in parallel.'''} )
_UpperCAmelCase = field(
default=2_00, metadata={'''help''': '''Number of completions to generate for each sample.'''} )
_UpperCAmelCase = field(default=1, metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase = field(
default='''eval_results.json''', metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase = field(
default='''0''', metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
_UpperCAmelCase = field(
default=-1, metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
}, )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default=A, metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
}, )
_UpperCAmelCase = field(
default='''transformersbook/codeparrot''', metadata={'''help''': '''Folder or name of dataset to process.'''} )
_UpperCAmelCase = field(
default='''codeparrot-clean''', metadata={'''help''': '''Folder to save processed processed dataset.'''} )
_UpperCAmelCase = field(
default=10_00_00, metadata={'''help''': '''Number of files to save per JSON output file.'''} )
_UpperCAmelCase = field(default='''content''', metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase = field(
default=10_00, metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=1_00, metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=0.25, metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=1.5, metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=0.7, metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Name or path to the tokenizer.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
_UpperCAmelCase = field(
default=0.85, metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''gpt2''', metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
_UpperCAmelCase = field(
default='''transformersbook/codeparrot-train''', metadata={'''help''': '''Dataset to train tokenizer on.'''} )
_UpperCAmelCase = field(default='''content''', metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase = field(default=20_00_00, metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
_UpperCAmelCase = field(
default=3_27_68, metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
_UpperCAmelCase = field(default='''codeparrot''', metadata={'''help''': '''Name of new tokenizer.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Name or path to the tokenizer.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''', metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
_UpperCAmelCase = field(
default='''tokenized-codeparrot-train''', metadata={'''help''': '''Repo name of the pretokenized data.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''gpt2-large''', metadata={'''help''': '''Configuration to use for model initialization.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Tokenizer attached to model.'''} )
_UpperCAmelCase = field(default='''codeparrot''', metadata={'''help''': '''Name of the created model.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 573 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , UpperCAmelCase_ )
| 648 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowercase = 4
_lowercase = 3
class _lowercase ( __a ):
pass
def __UpperCamelCase ( a : List[str] ) ->List[Any]:
for shard in shards:
for i in range(a ):
yield {"i": i, "shard": shard}
def __UpperCamelCase ( ) ->Union[str, Any]:
snake_case = int(os.environ['''RANK'''] )
snake_case = int(os.environ['''WORLD_SIZE'''] )
snake_case = ArgumentParser()
parser.add_argument('''--streaming''' , type=a )
parser.add_argument('''--local_rank''' , type=a )
parser.add_argument('''--num_workers''' , type=a , default=0 )
snake_case = parser.parse_args()
snake_case = args.streaming
snake_case = args.num_workers
snake_case = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(a )]}
snake_case = IterableDataset.from_generator(a , gen_kwargs=a )
if not streaming:
snake_case = Dataset.from_list(list(a ) )
snake_case = split_dataset_by_node(a , rank=a , world_size=a )
snake_case = torch.utils.data.DataLoader(a , num_workers=a )
snake_case = NUM_SHARDS * NUM_ITEMS_PER_SHARD
snake_case = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
snake_case = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 342 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __a ):
_UpperCAmelCase = (DDPMScheduler,)
def UpperCamelCase ( self , **A__ ) -> str:
snake_case = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A__ )
return config
def UpperCamelCase ( self ) -> Optional[Any]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A__ )
def UpperCamelCase ( self ) -> str:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def UpperCamelCase ( self ) -> Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A__ )
def UpperCamelCase ( self ) -> List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A__ )
def UpperCamelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=A__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A__ , prediction_type=A__ , sample_max_value=A__ , )
def UpperCamelCase ( self ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def UpperCamelCase ( self ) -> List[Any]:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = len(A__ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
snake_case = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(A__ ) )
snake_case = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCamelCase ( self ) -> Any:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case = scheduler_class(**A__ )
snake_case = len(A__ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
snake_case = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(A__ ) )
snake_case = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCamelCase ( self ) -> int:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A__ )
snake_case = scheduler.timesteps
for i, timestep in enumerate(A__ ):
if i == len(A__ ) - 1:
snake_case = -1
else:
snake_case = timesteps[i + 1]
snake_case = scheduler.previous_timestep(A__ )
snake_case = prev_t.item()
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 51, 0]
with self.assertRaises(A__ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 1, 0]
snake_case = len(A__ )
with self.assertRaises(A__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A__ , timesteps=A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A__ )
| 342 | 1 |
"""simple docstring"""
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 406 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 406 | 1 |
'''simple docstring'''
import operator
def _SCREAMING_SNAKE_CASE ( __snake_case : list , __snake_case : bool = False , __snake_case : list | None = None ):
_A = operator.lt if reverse else operator.gt
_A = solution or []
if not arr:
return solution
_A = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
_A = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 107 | '''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
| 107 | 1 |
"""simple docstring"""
import pytest
lowercase_ : Any = '''__dummy_dataset1__'''
lowercase_ : int = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def _lowerCAmelCase ( ) -> int:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowerCAmelCase ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowerCAmelCase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : int, lowerCamelCase__ : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = dataset_loading_script_name
_SCREAMING_SNAKE_CASE : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Any = script_dir / f'''{script_name}.py'''
with open(lowerCamelCase__, "w" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
| 295 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : DatasetInfo ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__, "dataset_info.json" ) )
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
_SCREAMING_SNAKE_CASE : List[str] = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_SCREAMING_SNAKE_CASE : Optional[Any] = yaml.safe_dump(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _lowerCAmelCase ( ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo()
_SCREAMING_SNAKE_CASE : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : DatasetInfosDict ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_SCREAMING_SNAKE_CASE : List[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__, "README.md" ) )
| 295 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , **__A ):
super().__init__(**__A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(__A )
def __call__( self , __A , __A = None , **__A , ):
if "text_queries" in kwargs:
__a = kwargs.pop("""text_queries""" )
if isinstance(__A , (str, Image.Image) ):
__a = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__a = image
__a = super().__call__(__A , **__A )
return results
def snake_case_ ( self , **__A ):
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
if "top_k" in kwargs:
__a = kwargs["""top_k"""]
return {}, {}, postprocess_params
def snake_case_ ( self , __A ):
__a = load_image(inputs["""image"""] )
__a = inputs["""candidate_labels"""]
if isinstance(__A , __A ):
__a = candidate_labels.split(""",""" )
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__A ):
__a = self.tokenizer(__A , return_tensors=self.framework )
__a = self.image_processor(__A , return_tensors=self.framework )
yield {
"is_last": i == len(__A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case_ ( self , __A ):
__a = model_inputs.pop("""target_size""" )
__a = model_inputs.pop("""candidate_label""" )
__a = model_inputs.pop("""is_last""" )
__a = self.model(**__A )
__a = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def snake_case_ ( self , __A , __A=0.1 , __A=None ):
__a = []
for model_output in model_outputs:
__a = model_output["""candidate_label"""]
__a = BaseModelOutput(__A )
__a = self.image_processor.post_process_object_detection(
outputs=__A , threshold=__A , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__a = outputs["""scores"""][index].item()
__a = self._get_bounding_box(outputs["""boxes"""][index][0] )
__a = {"""score""": score, """label""": label, """box""": box}
results.append(__A )
__a = sorted(__A , key=lambda __A : x["score"] , reverse=__A )
if top_k:
__a = results[:top_k]
return results
def snake_case_ ( self , __A ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 99 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _a ( __a ):
"""simple docstring"""
A_ = '''mra'''
def __init__( self : Any , lowercase_ : List[str]=50_265 , lowercase_ : Tuple=768 , lowercase_ : Optional[Any]=12 , lowercase_ : str=12 , lowercase_ : Optional[int]=3_072 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : str=512 , lowercase_ : Optional[int]=1 , lowercase_ : Tuple=0.0_2 , lowercase_ : List[Any]=1e-5 , lowercase_ : Optional[Any]="absolute" , lowercase_ : int=4 , lowercase_ : int="full" , lowercase_ : Tuple=0 , lowercase_ : str=0 , lowercase_ : List[str]=1 , lowercase_ : Tuple=0 , lowercase_ : Optional[Any]=2 , **lowercase_ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = type_vocab_size
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = block_per_row
lowercase_ = approx_mode
lowercase_ = initial_prior_first_n_blocks
lowercase_ = initial_prior_diagonal_n_blocks
| 451 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( _lowercase : List[Any] ) -> Union[str, Any]:
return [ord(A_ ) - 9_6 for elem in plain]
def UpperCamelCase__ ( _lowercase : int ) -> Any:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def UpperCamelCase__ ( ) -> Optional[Any]:
__UpperCAmelCase: List[Any] = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , A_ )
print("""Decoded:""" , decode(A_ ) )
if __name__ == "__main__":
main() | 710 | '''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.0_2 , snake_case_=4 , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = parent
__UpperCAmelCase: List[str] = batch_size
__UpperCAmelCase: Optional[int] = seq_length
__UpperCAmelCase: Optional[Any] = is_training
__UpperCAmelCase: Any = use_attention_mask
__UpperCAmelCase: List[str] = use_token_type_ids
__UpperCAmelCase: List[str] = use_labels
__UpperCAmelCase: List[str] = vocab_size
__UpperCAmelCase: Optional[Any] = hidden_size
__UpperCAmelCase: List[Any] = num_hidden_layers
__UpperCAmelCase: List[Any] = num_attention_heads
__UpperCAmelCase: Tuple = intermediate_size
__UpperCAmelCase: Dict = hidden_act
__UpperCAmelCase: Dict = hidden_dropout_prob
__UpperCAmelCase: Tuple = attention_probs_dropout_prob
__UpperCAmelCase: List[str] = max_position_embeddings
__UpperCAmelCase: List[Any] = type_vocab_size
__UpperCAmelCase: List[Any] = type_sequence_label_size
__UpperCAmelCase: List[str] = initializer_range
__UpperCAmelCase: List[Any] = num_choices
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: str = None
if self.use_attention_mask:
__UpperCAmelCase: Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase: List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase: List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[str] = config_and_inputs
__UpperCAmelCase: List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase: str = model_class_name.from_pretrained("""albert-base-v2""" )
__UpperCAmelCase: int = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__UpperCAmelCase: Dict = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase: int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase: Optional[int] = model(snake_case_ , attention_mask=snake_case_ )[0]
__UpperCAmelCase: Optional[int] = (1, 11, 768)
self.assertEqual(output.shape , snake_case_ )
__UpperCAmelCase: str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) ) | 466 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'yjernite/retribert-base-uncased': 512,
}
lowerCAmelCase__ = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RetriBertTokenizer
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Tuple="[SEP]" , UpperCAmelCase_ : Tuple="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : List[str]="[MASK]" , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : List[str] , ):
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , UpperCAmelCase_) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase_) != tokenize_chinese_chars
):
UpperCamelCase__ : List[Any] = getattr(UpperCAmelCase_ , normalizer_state.pop('type'))
UpperCamelCase__ : Optional[Any] = do_lower_case
UpperCamelCase__ : List[str] = strip_accents
UpperCamelCase__ : List[Any] = tokenize_chinese_chars
UpperCamelCase__ : Optional[Any] = normalizer_class(**UpperCAmelCase_)
UpperCamelCase__ : int = do_lower_case
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=None):
UpperCamelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : List[Any] = [self.sep_token_id]
UpperCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
UpperCamelCase__ : str = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 596 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : List[Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
UpperCamelCase__ : int = Dataset.from_dict(lowerCamelCase_)
return dataset
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : str = get_dataset()
UpperCamelCase__ : Dict = make_duplicate_clusters(UpperCAmelCase_ , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = get_dataset()
UpperCamelCase__, UpperCamelCase__ : Tuple = deduplicate_dataset(UpperCAmelCase_)
self.assertEqual(len(UpperCAmelCase_) , 2)
print(UpperCAmelCase_)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCAmelCase_)
| 596 | 1 |
from itertools import product
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = sides_number
lowercase__ = max_face_number * dice_number
lowercase__ = [0] * (max_total + 1)
lowercase__ = 1
lowercase__ = range(A__ , max_face_number + 1 )
for dice_numbers in product(A__ , repeat=A__ ):
lowercase__ = sum(A__ )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCAmelCase ( ):
lowercase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase__ = 0
lowercase__ = 9
lowercase__ = 4 * 9
lowercase__ = 6
for peter_total in range(A__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase__ = (4**9) * (6**6)
lowercase__ = peter_wins_count / total_games_number
lowercase__ = round(A__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 642 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642 | 1 |
def __lowerCAmelCase ( a__ , a__ ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 219 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> str:
'''simple docstring'''
__a = np.random.RandomState(_snake_case )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = 3 * [inputs['''prompt''']]
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
__a = self.get_dummy_inputs()
__a = 3 * [inputs.pop('''prompt''' )]
__a = pipe.tokenizer(
_snake_case , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''np''' , )
__a = text_inputs['''input_ids''']
__a = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__a = prompt_embeds
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = 3 * ['''this is a negative prompt''']
__a = negative_prompt
__a = 3 * [inputs['''prompt''']]
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
__a = self.get_dummy_inputs()
__a = 3 * [inputs.pop('''prompt''' )]
__a = []
for p in [prompt, negative_prompt]:
__a = pipe.tokenizer(
_snake_case , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''np''' , )
__a = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__a , __a = embeds
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__a = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''open neural network exchange'''
__a = np.random.RandomState(0 )
__a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''open neural network exchange'''
__a = np.random.RandomState(0 )
__a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = 0
def test_callback_fn(_snake_case , _snake_case , _snake_case ) -> None:
__a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__a = latents[0, -3:, -3:, -1]
__a = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__a = latents[0, -3:, -3:, -1]
__a = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__a = False
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''Andromeda galaxy in a bottle'''
__a = np.random.RandomState(0 )
pipe(
prompt=_snake_case , num_inference_steps=5 , guidance_scale=7.5 , generator=_snake_case , callback=_snake_case , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_snake_case , _snake_case )
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
__a = OnnxStableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None | 219 | 1 |
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__magic_name__ :Optional[Any] = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__magic_name__ :List[Any] = int(sequence[i], 2 )
return sequence
def __lowercase ( snake_case ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__magic_name__ :Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__magic_name__ :str = gray_code_sequence_string(bit_count - 1 )
__magic_name__ :int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__magic_name__ :int = '''0''' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__magic_name__ :str = '''1''' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
@dataclass
class __magic_name__ :
__A : nn.Module
__A : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__A : list = field(default_factory=__UpperCAmelCase )
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : int , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : int ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
__A : nn.Module
__A : nn.Module
__A : int = 0
__A : List = field(default_factory=__UpperCAmelCase )
__A : List = field(default_factory=__UpperCAmelCase )
def __call__( self : Dict , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized
lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval()
lowercase :Tuple = ResNetForImageClassification(a_).eval()
lowercase :int = ModuleTransfer(src=a_ , dest=a_)
lowercase :List[Any] = torch.randn((1, 3, 224, 224))
module_transfer(a_)
assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one."
lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}"""
print(a_)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""")
def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int:
lowercase :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase :Union[str, Any] = 1000
lowercase :Any = (1, num_labels)
lowercase :Tuple = '''huggingface/label-files'''
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Any = {int(a_): v for k, v in idalabel.items()}
lowercase :str = idalabel
lowercase :Any = {v: k for k, v in idalabel.items()}
lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
lowercase :Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """open-llama"""
def __init__( self , UpperCAmelCase_=10_00_00 , UpperCAmelCase_=40_96 , UpperCAmelCase_=1_10_08 , UpperCAmelCase_=32 , UpperCAmelCase_=32 , UpperCAmelCase_="silu" , UpperCAmelCase_=20_48 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-6 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = rms_norm_eps
snake_case_ = use_cache
snake_case_ = kwargs.pop(
"use_memorry_efficient_attention" , UpperCAmelCase_ )
snake_case_ = hidden_dropout_prob
snake_case_ = attention_dropout_prob
snake_case_ = use_stable_embedding
snake_case_ = shared_input_output_embedding
snake_case_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ , )
def _lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
snake_case_ = self.rope_scaling.get("type" , UpperCAmelCase_ )
snake_case_ = self.rope_scaling.get("factor" , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
lowercase__ = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
lowercase__ = {
'''jukebox''': 5_12,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=["v3", "v2", "v2"] , UpperCAmelCase_=5_12 , UpperCAmelCase_=5 , UpperCAmelCase_="<|endoftext|>" , **UpperCAmelCase_ , ):
snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = version
snake_case_ = max_n_lyric_tokens
snake_case_ = n_genres
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
snake_case_ = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
snake_case_ = oov.replace(R"\-'" , R"\-+'" )
snake_case_ = regex.compile(UpperCAmelCase_ )
snake_case_ = {v: k for k, v in self.artists_encoder.items()}
snake_case_ = {v: k for k, v in self.genres_encoder.items()}
snake_case_ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _lowercase ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _lowercase ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase_ ) ):
snake_case_ = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]]
snake_case_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case_ = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _lowercase ( self , UpperCAmelCase_ ):
return list(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ , snake_case_ , snake_case_ = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self._tokenize(UpperCAmelCase_ )
return artist, genre, lyrics
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case_ = artists[idx].lower()
snake_case_ = [genres[idx].lower()]
else:
snake_case_ = self._normalize(artists[idx] ) + ".v2"
snake_case_ = [
self._normalize(UpperCAmelCase_ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case_ = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
snake_case_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
snake_case_ = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )}
snake_case_ = 0
snake_case_ = len(UpperCAmelCase_ ) + 1
snake_case_ = self.vocab
snake_case_ = {v: k for k, v in self.vocab.items()}
snake_case_ = ""
else:
snake_case_ = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
snake_case_ = self._run_strip_accents(UpperCAmelCase_ )
snake_case_ = lyrics.replace("\\" , "\n" )
snake_case_ = self.out_of_vocab.sub("" , UpperCAmelCase_ ), [], []
return artists, genres, lyrics
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = unicodedata.normalize("NFD" , UpperCAmelCase_ )
snake_case_ = []
for char in text:
snake_case_ = unicodedata.category(UpperCAmelCase_ )
if cat == "Mn":
continue
output.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = (
[chr(UpperCAmelCase_ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
snake_case_ = frozenset(UpperCAmelCase_ )
snake_case_ = re.compile(R"_+" )
snake_case_ = "".join([c if c in accepted else "_" for c in text.lower()] )
snake_case_ = pattern.sub("_" , UpperCAmelCase_ ).strip("_" )
return text
def _lowercase ( self , UpperCAmelCase_ ):
return " ".join(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
# Convert to TensorType
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = TensorType(UpperCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
snake_case_ = tf.constant
snake_case_ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
snake_case_ = torch.tensor
snake_case_ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
snake_case_ = jnp.array
snake_case_ = _is_jax
else:
snake_case_ = np.asarray
snake_case_ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case_ = [inputs]
if not is_tensor(UpperCAmelCase_ ):
snake_case_ = as_tensor(UpperCAmelCase_ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_="pt" ):
snake_case_ = [0, 0, 0]
snake_case_ = [artist] * len(self.version )
snake_case_ = [genres] * len(self.version )
snake_case_ , snake_case_ , snake_case_ = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ , snake_case_ = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = [-INFINITY] * len(full_tokens[-1] )
snake_case_ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) )
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) )
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.artists_decoder.get(UpperCAmelCase_ )
snake_case_ = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index]
snake_case_ = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 420 | 1 |
import baseaa
def _lowercase ( a__ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def _lowercase ( a__ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(a__ ).decode("utf-8" )
if __name__ == "__main__":
__lowerCAmelCase = """Hello World!"""
__lowerCAmelCase = baseaa_encode(test)
print(encoded)
__lowerCAmelCase = baseaa_decode(encoded)
print(decoded)
| 147 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowerCAmelCase = 8
def _lowercase ( a__ : Optional[Any] , a__ : Any=BITS ) -> Dict:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x * 2_55).int().clamp(0 , 2_55 )
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b c h w -> b c 1 h w" )
_UpperCamelCase = ((x & mask) != 0).float()
_UpperCamelCase = rearrange(a__ , "b c d h w -> b (c d) h w" )
_UpperCamelCase = bits * 2 - 1
return bits
def _lowercase ( a__ : Optional[Any] , a__ : str=BITS ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x > 0).int()
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ , dtype=torch.intaa )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b (c d) h w -> b c d h w" , d=8 )
_UpperCamelCase = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _lowercase ( self : Optional[Any] , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : float = 0.0 , a__ : bool = True , a__ : Any=None , a__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[timestep]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_UpperCamelCase = self._get_variance(a__ , a__ )
_UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_UpperCamelCase = model_output.device if torch.is_tensor(a__ ) else "cpu"
_UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=a__ ).to(a__ )
_UpperCamelCase = self._get_variance(a__ , a__ ) ** 0.5 * eta * noise
_UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
def _lowercase ( self : str , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : int="epsilon" , a__ : int=None , a__ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_UpperCamelCase , _UpperCamelCase = torch.split(a__ , sample.shape[1] , dim=1 )
else:
_UpperCamelCase = None
# 1. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_UpperCamelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCamelCase = 0
if t > 0:
_UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=a__ ).to(model_output.device )
_UpperCamelCase = (self._get_variance(a__ , predicted_variance=a__ ) ** 0.5) * noise
_UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1.0 , ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCamelCase = bit_scale
_UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 50 , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCamelCase_ , )
_UpperCamelCase = decimal_to_bits(lowerCamelCase_ ) * self.bit_scale
_UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
_UpperCamelCase = bits_to_decimal(lowerCamelCase_ )
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 147 | 1 |
from PIL import Image
def _UpperCAmelCase (UpperCamelCase_ : Image ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = image.size
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = image.load()
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
_lowerCAmelCase : str = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase_ ):
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : int = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCamelCase : int = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 196 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] , _UpperCAmelCase : int=[1, 1, 2, 1] , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str="relu" , _UpperCAmelCase : int=3 , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : List[str] = embeddings_size
_lowerCAmelCase : int = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : Optional[int] = len(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = TFResNetForImageClassification(_UpperCAmelCase )
_lowerCAmelCase : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFResNetModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_lowerCAmelCase : Dict = model_class(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Tuple = layer_type
_lowerCAmelCase : Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Optional[int] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase : int = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1E-4 ) )
| 196 | 1 |
__lowerCamelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__lowerCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__lowerCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 204 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int=8 ):
snake_case : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( A_ ):
def __init__(self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if latents is None:
snake_case : int = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Tuple = latents.to(snake_case__ )
snake_case : int = latents * scheduler.init_noise_sigma
return latents
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
snake_case : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
snake_case : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Union[str, Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
snake_case : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : int = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__(self : int , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_12 , snake_case__ : int = 5_12 , snake_case__ : int = 1_00 , snake_case__ : float = 4.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = self._execution_device
snake_case : List[str] = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[int] = torch.cat(snake_case__ , dim=0 )
snake_case : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case__ , snake_case__ ):
snake_case : List[Any] = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
snake_case : str = image_embeds.repeat_interleave(snake_case__ , dim=0 )
snake_case : Optional[int] = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
snake_case : Optional[int] = self.scheduler.timesteps
snake_case : int = self.unet.config.in_channels
snake_case , snake_case : Any = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
# create initial latent
snake_case : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case__ , snake_case__ , snake_case__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Tuple = {"image_embeds": image_embeds}
snake_case : str = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Union[str, Any] = variance_pred.chunk(2 )
snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Tuple = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
snake_case : Optional[Any] = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case : Union[str, Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 204 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , _UpperCAmelCase : WhisperForConditionalGeneration , _UpperCAmelCase : WhisperProcessor , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_UpperCAmelCase , speech_processor=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> str:
"""simple docstring"""
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=1_60_00 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
__lowercase = self.speech_processor.feature_extractor(
_UpperCAmelCase , return_tensors='pt' , sampling_rate=_UpperCAmelCase ).input_features.to(self.device )
__lowercase = self.speech_model.generate(_UpperCAmelCase , max_length=48_00_00 )
__lowercase = self.speech_processor.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , normalize=_UpperCAmelCase )[
0
]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_UpperCAmelCase )}.""" )
# get prompt text embeddings
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase , __lowercase , __lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''] * batch_size
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !="""
f""" {type(_UpperCAmelCase )}.""" )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' , )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 , _UpperCAmelCase , 1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
__lowercase = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(_UpperCAmelCase ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = eval_examples
_UpperCAmelCase = post_process_function
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "eval" ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
_SCREAMING_SNAKE_CASE , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , metric_key_prefix=_SCREAMING_SNAKE_CASE , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions )
_UpperCAmelCase = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_UpperCAmelCase = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
_UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _SCREAMING_SNAKE_CASE )
return metrics
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "test" ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_test_dataloader(_SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
_SCREAMING_SNAKE_CASE , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , metric_key_prefix=_SCREAMING_SNAKE_CASE , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions , 'predict' )
_UpperCAmelCase = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_UpperCAmelCase = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_SCREAMING_SNAKE_CASE )
| 618 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __a ( unittest.TestCase ):
_a : int = MODEL_FOR_MASKED_LM_MAPPING
_a : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
_UpperCAmelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-0_5, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-0_5, 'token': 25506, 'token_str': ' accuser'},
] , )
_UpperCAmelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-0_5,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-0_5,
'token': 25506,
'token_str': ' accuser',
},
] , )
_UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-0_5, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
_UpperCAmelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-0_5, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_UpperCAmelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-0_5, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
] , )
_UpperCAmelCase = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
@slow
@require_tf
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'},
] , )
_UpperCAmelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12790,
'token_str': ' Lyon',
},
] , )
_UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
@require_tf
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker('This is' )
self.run_test_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , targets=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [top_mask['token_str'] for top_mask in outputs]
_UpperCAmelCase = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='' )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , top_k=2 )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el['token_str'] for el in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ).issubset(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
| 618 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
a_ = 3
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print("Generating primitive root of p" )
while True:
snake_case_ : Union[str, Any] = random.randrange(3, __SCREAMING_SNAKE_CASE )
if pow(__SCREAMING_SNAKE_CASE, 2, __SCREAMING_SNAKE_CASE ) == 1:
continue
if pow(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) == 1:
continue
return g
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print("Generating prime p..." )
snake_case_ : Union[str, Any] = rabin_miller.generate_large_prime(__SCREAMING_SNAKE_CASE ) # select large prime number.
snake_case_ : Optional[int] = primitive_root(__SCREAMING_SNAKE_CASE ) # one primitive root on modulo p.
snake_case_ : Tuple = random.randrange(3, __SCREAMING_SNAKE_CASE ) # private_key -> have to be greater than 2 for safety.
snake_case_ : int = cryptomath.find_mod_inverse(pow(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), __SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = (key_size, e_a, e_a, p)
snake_case_ : Dict = (key_size, d)
return public_key, private_key
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("\nWARNING:" )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program." )
sys.exit()
snake_case_ : List[Any] = generate_key(__SCREAMING_SNAKE_CASE )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt', "w" ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt', "w" ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def UpperCamelCase_ ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal", 2_0_4_8 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
a_ = ["bert-base-uncased", "bert-base-cased"]
a_ = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model ):
def __init__( self , lowercase_):
super().__init__()
snake_case_ : Union[str, Any] = tokenizer
snake_case_ : List[str] = AutoConfig.from_pretrained(lowercase_)
snake_case_ : List[str] = TFAutoModel.from_config(lowercase_)
def snake_case__ ( self , lowercase_):
snake_case_ : List[str] = self.tokenizer(lowercase_)
snake_case_ : str = self.bert(**lowercase_)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case__ ( self):
super().setUp()
snake_case_ : Dict = [
BertTokenizer.from_pretrained(lowercase_) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
snake_case_ : Tuple = [TFBertTokenizer.from_pretrained(lowercase_) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowercase_ , use_fast_bert_tokenizer=lowercase_)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case_ : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case_ : str = list(zip(self.test_sentences , self.test_sentences[::-1]))
def snake_case__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ : Any = tokenizer(lowercase_ , return_tensors="tf" , padding="longest")
snake_case_ : Union[str, Any] = tf_tokenizer(lowercase_)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def snake_case__ ( self):
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Union[str, Any] = tf_tokenizer(self.paired_sentences)
snake_case_ : str = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def snake_case__ ( self):
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : int = tf.function(lowercase_)
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ : Optional[int] = tf.constant(lowercase_)
snake_case_ : Tuple = compiled_tokenizer(lowercase_)
snake_case_ : Any = tf_tokenizer(lowercase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def snake_case__ ( self):
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : List[Any] = ModelToSave(tokenizer=lowercase_)
snake_case_ : Any = tf.convert_to_tensor(self.test_sentences)
snake_case_ : List[Any] = model(lowercase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case_ : Optional[Any] = Path(lowercase_) / "saved.model"
model.save(lowercase_)
snake_case_ : Tuple = tf.keras.models.load_model(lowercase_)
snake_case_ : str = loaded_model(lowercase_)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1E-5)
| 92 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class lowerCamelCase_ ( A__ ):
'''simple docstring'''
lowercase_ = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"question": Value("string" ), "context": Value("string" )} )
lowercase_ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
lowercase_ = "question"
lowercase_ = "context"
lowercase_ = "answers"
@property
def lowerCAmelCase_ ( self : int ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"} | 31 |
'''simple docstring'''
import re
def __lowerCamelCase ( __lowerCAmelCase : str ) -> list:
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
snake_case = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool , __lowerCAmelCase : str ) -> str:
try:
snake_case = split_input(__lowerCAmelCase )
if upper:
snake_case = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
snake_case = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
return to_simple_case(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
try:
snake_case = to_simple_case(__lowerCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """_""" )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """-""" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 369 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Dict =tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ : int =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
UpperCamelCase__ : Tuple =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCamelCase__ : Optional[Any] ={"unk_token": "<unk>"}
UpperCamelCase__ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
UpperCamelCase__ : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE))
UpperCamelCase__ : Dict ={
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Optional[Any] =os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
UpperCamelCase__ : Optional[Any] =[Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Tuple =self.get_tokenizer()
UpperCamelCase__ : List[Any] =self.get_rust_tokenizer()
UpperCamelCase__ : List[str] =self.get_image_processor()
UpperCamelCase__ : Any =CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Union[str, Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
UpperCamelCase__ : Optional[int] =self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
UpperCamelCase__ : List[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Tuple =self.get_image_processor()
UpperCamelCase__ : str =self.get_tokenizer()
UpperCamelCase__ : int =CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple =self.prepare_image_inputs()
UpperCamelCase__ : Any =image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np")
UpperCamelCase__ : Optional[int] =processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =self.get_image_processor()
UpperCamelCase__ : Union[str, Any] =self.get_tokenizer()
UpperCamelCase__ : Union[str, Any] =CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple ="lower newer"
UpperCamelCase__ : Dict =processor(text=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =tokenizer(__SCREAMING_SNAKE_CASE)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any =self.get_image_processor()
UpperCamelCase__ : Any =self.get_tokenizer()
UpperCamelCase__ : int =CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] ="lower newer"
UpperCamelCase__ : Optional[Any] =self.prepare_image_inputs()
UpperCamelCase__ : int =processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =self.get_image_processor()
UpperCamelCase__ : Union[str, Any] =self.get_tokenizer()
UpperCamelCase__ : Optional[int] =CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =self.prepare_image_inputs()
UpperCamelCase__ : List[Any] =self.prepare_image_inputs()
UpperCamelCase__ : str =processor(images=__SCREAMING_SNAKE_CASE , visual_prompt=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] =self.get_image_processor()
UpperCamelCase__ : Any =self.get_tokenizer()
UpperCamelCase__ : int =CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : str =processor.batch_decode(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 582 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =GenerationConfig.from_pretrained(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __SCREAMING_SNAKE_CASE)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] =AutoConfig.from_pretrained("gpt2")
UpperCamelCase__ : Optional[Any] =GenerationConfig.from_model_config(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] =GenerationConfig()
UpperCamelCase__ : Optional[Any] ={
"max_new_tokens": 10_24,
"foo": "bar",
}
UpperCamelCase__ : Tuple =copy.deepcopy(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =generation_config.update(**__SCREAMING_SNAKE_CASE)
# update_kwargs was not modified (no side effects)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__SCREAMING_SNAKE_CASE , {"foo": "bar"})
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str =GenerationConfig()
UpperCamelCase__ : Dict ="bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =GenerationConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
UpperCamelCase__ : Union[str, Any] =GenerationConfig.from_model_config(__SCREAMING_SNAKE_CASE)
assert not hasattr(__SCREAMING_SNAKE_CASE , "foo") # no new kwargs should be initialized if from config
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : Dict =GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __SCREAMING_SNAKE_CASE)
self.assertEqual(default_config.num_beams , 1)
UpperCamelCase__ : Any =GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __SCREAMING_SNAKE_CASE)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =GenerationConfig.from_pretrained(__SCREAMING_SNAKE_CASE , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __SCREAMING_SNAKE_CASE)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase ( cls) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any =TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def UpperCAmelCase ( cls) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
UpperCamelCase__ : List[Any] =GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="test-generation-config" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
UpperCamelCase__ : Union[str, Any] =GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple =GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
UpperCamelCase__ : str =GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="valid_org/test-generation-config-org" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
UpperCamelCase__ : Union[str, Any] =GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
| 582 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : List[Any] = '''realm'''
def __init__( self : str , _lowercase : int=30_522 , _lowercase : Optional[int]=768 , _lowercase : Dict=128 , _lowercase : List[Any]=12 , _lowercase : Any=12 , _lowercase : int=8 , _lowercase : Optional[int]=3_072 , _lowercase : List[str]="gelu_new" , _lowercase : List[str]=0.1 , _lowercase : Any=0.1 , _lowercase : List[str]=512 , _lowercase : Union[str, Any]=2 , _lowercase : List[Any]=0.02 , _lowercase : str=1E-12 , _lowercase : Union[str, Any]=256 , _lowercase : Optional[Any]=10 , _lowercase : Optional[Any]=1E-3 , _lowercase : List[str]=5 , _lowercase : str=320 , _lowercase : Dict=13_353_718 , _lowercase : int=5_000 , _lowercase : Optional[Any]=1 , _lowercase : Union[str, Any]=0 , _lowercase : Optional[Any]=2 , **_lowercase : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
_UpperCamelCase: int = vocab_size
_UpperCamelCase: Tuple = max_position_embeddings
_UpperCamelCase: Tuple = hidden_size
_UpperCamelCase: int = retriever_proj_size
_UpperCamelCase: List[Any] = num_hidden_layers
_UpperCamelCase: str = num_attention_heads
_UpperCamelCase: Optional[int] = num_candidates
_UpperCamelCase: Union[str, Any] = intermediate_size
_UpperCamelCase: Any = hidden_act
_UpperCamelCase: int = hidden_dropout_prob
_UpperCamelCase: str = attention_probs_dropout_prob
_UpperCamelCase: Optional[Any] = initializer_range
_UpperCamelCase: str = type_vocab_size
_UpperCamelCase: Optional[int] = layer_norm_eps
# Reader config
_UpperCamelCase: Tuple = span_hidden_size
_UpperCamelCase: Union[str, Any] = max_span_width
_UpperCamelCase: int = reader_layer_norm_eps
_UpperCamelCase: Dict = reader_beam_size
_UpperCamelCase: int = reader_seq_len
# Retrieval config
_UpperCamelCase: Optional[Any] = num_block_records
_UpperCamelCase: Dict = searcher_beam_size | 271 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowercase: str , lowercase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: int = b.T
_UpperCamelCase: Optional[Any] = np.sum(np.square(lowercase ) , axis=1 )
_UpperCamelCase: List[Any] = np.sum(np.square(lowercase ) , axis=0 )
_UpperCamelCase: Tuple = np.matmul(lowercase , lowercase )
_UpperCamelCase: Tuple = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCAmelCase_ ( lowercase: List[str] , lowercase: List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase: Tuple = x.reshape(-1 , 3 )
_UpperCamelCase: Dict = squared_euclidean_distance(lowercase , lowercase )
return np.argmin(lowercase , axis=1 )
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = ['''pixel_values''']
def __init__( self : List[str] , _lowercase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : bool = True , **_lowercase : Tuple , ):
"""simple docstring"""
super().__init__(**_lowercase )
_UpperCamelCase: Tuple = size if size is not None else {'''height''': 256, '''width''': 256}
_UpperCamelCase: List[str] = get_size_dict(_lowercase )
_UpperCamelCase: Dict = np.array(_lowercase ) if clusters is not None else None
_UpperCamelCase: Optional[Any] = do_resize
_UpperCamelCase: Union[str, Any] = size
_UpperCamelCase: Optional[int] = resample
_UpperCamelCase: Any = do_normalize
_UpperCamelCase: int = do_color_quantize
def lowerCAmelCase ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
"""simple docstring"""
_UpperCamelCase: List[str] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
_lowercase , size=(size['''height'''], size['''width''']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCAmelCase ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
_UpperCamelCase: Any = rescale(image=_lowercase , scale=1 / 127.5 , data_format=_lowercase )
_UpperCamelCase: List[Any] = image - 1
return image
def lowerCAmelCase ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_lowercase : List[str] , ):
"""simple docstring"""
_UpperCamelCase: Any = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase: Optional[Any] = size if size is not None else self.size
_UpperCamelCase: Optional[Any] = get_size_dict(_lowercase )
_UpperCamelCase: Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase: Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase: Any = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCamelCase: Dict = clusters if clusters is not None else self.clusters
_UpperCamelCase: Optional[Any] = np.array(_lowercase )
_UpperCamelCase: List[str] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase: Optional[int] = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
_UpperCamelCase: int = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_normalize:
_UpperCamelCase: Any = [self.normalize(image=_lowercase ) for image in images]
if do_color_quantize:
_UpperCamelCase: Tuple = [to_channel_dimension_format(_lowercase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCamelCase: Tuple = np.array(_lowercase )
_UpperCamelCase: List[Any] = color_quantize(_lowercase , _lowercase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCamelCase: Any = images.shape[0]
_UpperCamelCase: Optional[int] = images.reshape(_lowercase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCamelCase: Tuple = list(_lowercase )
else:
_UpperCamelCase: List[str] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
_UpperCamelCase: List[Any] = {'''input_ids''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase ) | 271 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def A ( self : List[Any] , A_ : Any )-> Any:
os.makedirs(A_ , exist_ok=A_ )
__UpperCamelCase = {"source": "What is love ?", "target": "life"}
__UpperCamelCase = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__UpperCamelCase = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , "w" ) as f:
f.write(A_ )
def A ( self : str , A_ : int , A_ : str = "pytorch" )-> str:
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = os.path.join(A_ , "output" )
__UpperCamelCase = os.path.join(A_ , "data" )
self._create_dummy_data(data_dir=A_ )
__UpperCamelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
__UpperCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
__UpperCamelCase = os.path.join(A_ , "metrics.json" )
with open(A_ ) as f:
__UpperCamelCase = json.load(A_ )
return result
@require_torch_gpu
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def A ( self : str )-> Optional[int]:
__UpperCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def A ( self : int )-> List[str]:
__UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A ( self : List[Any] )-> Any:
__UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) | 228 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : torch.FloatTensor
class __UpperCAmelCase ( snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , A_ : int = 32 , A_ : int = 64 , A_ : int = 20 , A_ : int = 7_68 , A_ : Dict=77 , A_ : Union[str, Any]=4 , A_ : float = 0.0 , A_ : str = "silu" , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[str] = "linear" , A_ : Optional[str] = "prd" , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[int] = None , )-> Optional[int]:
super().__init__()
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = num_attention_heads * attention_head_dim
__UpperCamelCase = additional_embeddings
__UpperCamelCase = time_embed_dim or inner_dim
__UpperCamelCase = embedding_proj_dim or embedding_dim
__UpperCamelCase = clip_embed_dim or embedding_dim
__UpperCamelCase = Timesteps(A_ , A_ , 0 )
__UpperCamelCase = TimestepEmbedding(A_ , A_ , out_dim=A_ , act_fn=A_ )
__UpperCamelCase = nn.Linear(A_ , A_ )
if embedding_proj_norm_type is None:
__UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
__UpperCamelCase = nn.LayerNorm(A_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCamelCase = nn.Linear(A_ , A_ )
if encoder_hid_proj_type is None:
__UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
__UpperCamelCase = nn.Linear(A_ , A_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A_ ) )
if added_emb_type == "prd":
__UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , A_ ) )
elif added_emb_type is None:
__UpperCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , activation_fn="gelu" , attention_bias=A_ , )
for d in range(A_ )
] )
if norm_in_type == "layer":
__UpperCamelCase = nn.LayerNorm(A_ )
elif norm_in_type is None:
__UpperCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCamelCase = nn.LayerNorm(A_ )
__UpperCamelCase = nn.Linear(A_ , A_ )
__UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , A_ , persistent=A_ )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A ( self : Tuple )-> Dict[str, AttentionProcessor]:
__UpperCamelCase = {}
def fn_recursive_add_processors(A_ : str , A_ : torch.nn.Module , A_ : Dict[str, AttentionProcessor] ):
if hasattr(A_ , "set_processor" ):
__UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , A_ , A_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A_ , A_ , A_ )
return processors
def A ( self : Tuple , A_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] )-> Optional[int]:
__UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(A_ , A_ ) and len(A_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(A_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(A_ : str , A_ : torch.nn.Module , A_ : Any ):
if hasattr(A_ , "set_processor" ):
if not isinstance(A_ , A_ ):
module.set_processor(A_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , A_ , A_ )
for name, module in self.named_children():
fn_recursive_attn_processor(A_ , A_ , A_ )
def A ( self : List[str] )-> List[str]:
self.set_attn_processor(AttnProcessor() )
def A ( self : Dict , A_ : str , A_ : Union[torch.Tensor, float, int] , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.BoolTensor] = None , A_ : bool = True , )-> Any:
__UpperCamelCase = hidden_states.shape[0]
__UpperCamelCase = timestep
if not torch.is_tensor(A_ ):
__UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
__UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase = timesteps * torch.ones(A_ , dtype=timesteps.dtype , device=timesteps.device )
__UpperCamelCase = self.time_proj(A_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
__UpperCamelCase = self.time_embedding(A_ )
if self.embedding_proj_norm is not None:
__UpperCamelCase = self.embedding_proj_norm(A_ )
__UpperCamelCase = self.embedding_proj(A_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCamelCase = self.encoder_hidden_states_proj(A_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
__UpperCamelCase = self.proj_in(A_ )
__UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
__UpperCamelCase = []
__UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(A_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCamelCase = hidden_states[:, None, :]
__UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(A_ , -1 , -1 )
additional_embeds.append(A_ )
__UpperCamelCase = torch.cat(
A_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCamelCase = F.pad(
A_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCamelCase = F.pad(A_ , (0, self.additional_embeddings) , value=0.0 )
__UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCamelCase = self.norm_in(A_ )
for block in self.transformer_blocks:
__UpperCamelCase = block(A_ , attention_mask=A_ )
__UpperCamelCase = self.norm_out(A_ )
if self.prd_embedding is not None:
__UpperCamelCase = hidden_states[:, -1]
else:
__UpperCamelCase = hidden_states[:, additional_embeddings_len:]
__UpperCamelCase = self.proj_to_clip_embeddings(A_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A_ )
def A ( self : Dict , A_ : Tuple )-> Dict:
__UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 228 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__a: Optional[int] = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__a: Tuple = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[int]:
_UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = numpy_to_pil(__snake_case )
return images
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]:
if images.ndim == 3:
_UpperCAmelCase = images[None, ...]
_UpperCAmelCase = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_UpperCAmelCase = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
_UpperCAmelCase = [Image.fromarray(__snake_case ) for image in images]
return pil_images | 108 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Optional[Any]=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Dict=32, UpperCamelCase__ : Any=2, UpperCamelCase__ : Optional[int]=4, UpperCamelCase__ : Tuple=37, UpperCamelCase__ : Union[str, Any]="gelu", UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Any=0.1, UpperCamelCase__ : Union[str, Any]=5_12, UpperCamelCase__ : Optional[Any]=16, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : List[str]=3, UpperCamelCase__ : Optional[Any]=4, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=0, ) -> str:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = projection_dim
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
_A = DPRConfig(projection_dim=self.projection_dim, **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any] ) -> int:
_A = TFDPRContextEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : str ) -> int:
_A = TFDPRQuestionEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ) -> Any:
_A = TFDPRReader(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_A = TFDPRModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_A = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
_A = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_A = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4 ) )
| 107 | 0 |
import unittest
from knapsack import knapsack as k
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[Any] = [0]
_UpperCAmelCase : Optional[Any] = [0]
_UpperCAmelCase : Optional[int] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0)
_UpperCAmelCase : Optional[int] = [60]
_UpperCAmelCase : List[str] = [10]
_UpperCAmelCase : str = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = 3
_UpperCAmelCase : int = [1, 2, 3]
_UpperCAmelCase : List[str] = [3, 2, 1]
_UpperCAmelCase : Union[str, Any] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 5)
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = 50
_UpperCAmelCase : Tuple = [60, 100, 120]
_UpperCAmelCase : Optional[int] = [10, 20, 30]
_UpperCAmelCase : Optional[Any] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 220)
if __name__ == "__main__":
unittest.main()
| 186 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string'''),
'''references''': datasets.Value('''string'''),
}) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self , _A , _A) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = 0.0
for i, j in zip(_A , _A):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A) else 0.0
_UpperCAmelCase : Tuple = n_correct / len(_A)
return {
"accuracy": accuracy,
}
| 186 | 1 |
'''simple docstring'''
from string import ascii_uppercase
UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ = dict(enumerate(ascii_uppercase))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = 0
while True:
if x == i:
UpperCAmelCase__ = 0
if len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ):
break
key += key[i]
i += 1
return key
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = """"""
UpperCAmelCase__ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCAmelCase__ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = """"""
UpperCAmelCase__ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCAmelCase__ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """THE GERMAN ATTACK"""
UpperCAmelCase__ = """SECRET"""
UpperCAmelCase__ = generate_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cipher_text(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 603 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "conditional_detr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , _A : str=True , _A : str=None , _A : Optional[int]=3 , _A : Dict=300 , _A : Union[str, Any]=6 , _A : Any=2048 , _A : Dict=8 , _A : List[str]=6 , _A : List[Any]=2048 , _A : Tuple=8 , _A : int=0.0 , _A : List[str]=0.0 , _A : str=True , _A : Any="relu" , _A : Dict=256 , _A : List[str]=0.1 , _A : Optional[int]=0.0 , _A : List[str]=0.0 , _A : str=0.02 , _A : Tuple=1.0 , _A : Optional[Any]=False , _A : int="sine" , _A : Optional[Any]="resnet50" , _A : str=True , _A : str=False , _A : Optional[int]=2 , _A : Union[str, Any]=5 , _A : str=2 , _A : Optional[Any]=1 , _A : str=1 , _A : List[str]=2 , _A : List[str]=5 , _A : str=2 , _A : Dict=0.25 , **_A : str , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_A , _A ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = cls_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : Tuple ):
return self.d_model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase_ ( self : int ):
return 1e-5
@property
def UpperCamelCase_ ( self : Any ):
return 12
| 71 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 1 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str = "https://www.worldometers.info/coronavirus" ):
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(UpperCamelCase__ ).text , """html.parser""" )
SCREAMING_SNAKE_CASE__ = soup.findAll("""h1""" )
SCREAMING_SNAKE_CASE__ = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''') | 6 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 0 |
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 436 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Dict = """mvp"""
__a : List[str] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self, snake_case__=5_02_67, snake_case__=10_24, snake_case__=12, snake_case__=40_96, snake_case__=16, snake_case__=12, snake_case__=40_96, snake_case__=16, snake_case__=0.0, snake_case__=0.0, snake_case__="gelu", snake_case__=10_24, snake_case__=0.1, snake_case__=0.0, snake_case__=0.0, snake_case__=0.02, snake_case__=0.0, snake_case__=False, snake_case__=True, snake_case__=1, snake_case__=0, snake_case__=2, snake_case__=True, snake_case__=2, snake_case__=2, snake_case__=False, snake_case__=1_00, snake_case__=8_00, **snake_case__, ) -> str:
"""simple docstring"""
lowercase_ : Dict = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : List[Any] = d_model
lowercase_ : Tuple = encoder_ffn_dim
lowercase_ : List[Any] = encoder_layers
lowercase_ : Union[str, Any] = encoder_attention_heads
lowercase_ : int = decoder_ffn_dim
lowercase_ : str = decoder_layers
lowercase_ : Tuple = decoder_attention_heads
lowercase_ : Union[str, Any] = dropout
lowercase_ : int = attention_dropout
lowercase_ : Any = activation_dropout
lowercase_ : Tuple = activation_function
lowercase_ : str = init_std
lowercase_ : int = encoder_layerdrop
lowercase_ : Dict = decoder_layerdrop
lowercase_ : List[Any] = classifier_dropout
lowercase_ : Union[str, Any] = use_cache
lowercase_ : Optional[Any] = encoder_layers
lowercase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : List[str] = use_prompt
lowercase_ : Union[str, Any] = prompt_length
lowercase_ : Dict = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__, is_encoder_decoder=snake_case__, decoder_start_token_id=snake_case__, forced_eos_token_id=snake_case__, **snake_case__, )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""", snake_case__ ):
lowercase_ : Dict = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" ) | 436 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a ( ) -> List[str]:
"""simple docstring"""
a_ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
a_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return image
def a ( _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
a_ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
a_ = dct.pop(_UpperCAmelCase )
a_ = val
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
a_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a_ = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
a_ = qkv_bias
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = 3_6_4 if 'coco' in model_name else 2_2_4
a_ = InstructBlipVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a_ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a_ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a_ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
a_ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a_ = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
a_ = InstructBlipConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase , qformer_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def a ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ) -> Optional[int]:
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
a_ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a_ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
a_ , a_ = get_blipa_config(_UpperCAmelCase )
a_ = InstructBlipForConditionalGeneration(_UpperCAmelCase ).eval()
a_ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
a_ , a_ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a_ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
a_ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
a_ , a_ , a_ = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
a_ = original_model.state_dict()
a_ = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a_ = state_dict.pop(_UpperCAmelCase )
if key.startswith('Qformer.bert' ):
a_ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a_ = key.replace('self' , 'attention' )
if "llm_proj" in key:
a_ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
a_ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
a_ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
a_ = key.replace('t5' , 'language' )
a_ = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
a_ = load_demo_image()
a_ = 'What is unusual about this image?'
# create processor
a_ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
a_ = InstructBlipProcessor(
image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase , )
a_ = processor(images=_UpperCAmelCase , text=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
a_ = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
a_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
a_ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
a_ = hf_model(**_UpperCAmelCase ).logits
else:
a_ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
a_ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
a_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
a_ = hf_model(**_UpperCAmelCase , labels=_UpperCAmelCase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a_ = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , _UpperCAmelCase , atol=_UpperCAmelCase )
print('Looks ok!' )
print('Generating with original model...' )
a_ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
a_ = hf_model.generate(
**_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a_ = 2
print('Original generation:' , _UpperCAmelCase )
a_ = processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
a_ = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
__lowerCAmelCase =[
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__lowerCAmelCase =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 1 |
import os
import sys
import unittest
UpperCAmelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase : Optional[Any] = os.path.join(git_repo_path, '''src''', '''transformers''')
UpperCAmelCase : Optional[int] = '''
{0} = None
'''
UpperCAmelCase : Optional[int] = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
UpperCAmelCase : int = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(_A )
__A : Optional[int] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_A , 'tokenizers' )
__A : int = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_A , 'tensorflow_text' )
__A : str = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_A , 'sentencepiece_and_tokenizers' )
__A : Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_A , 'sentencepiece_and_tensorflow_text' )
__A : Tuple = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_A , 'sentencepiece_and_tokenizers_and_vision' )
def UpperCAmelCase_ ( self ):
__A : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _A )
self.assertIn('tensorflow_text' , _A )
self.assertIn('sentencepiece_and_tokenizers' , _A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_A , '\nCONSTANT = None\n' )
__A : Optional[int] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__A : List[Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
__A : Any = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
__A : List[str] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _A )
| 77 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77 | 1 |
from collections import defaultdict
from math import gcd
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 1_50_00_00 ):
"""simple docstring"""
a_ : defaultdict = defaultdict(SCREAMING_SNAKE_CASE_ )
a_ : Optional[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ):
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1:
continue
a_ : Union[str, Any] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 419 |
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : List[str] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : List[str] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : str = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : List[str] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : List[str] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=__A ):
UpperCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 419 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
a_ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
a_ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
a_ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
a_ = F"down_blocks.{i}.resnets.{j}."
a_ = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
a_ = F"down_blocks.{i}.attentions.{j}."
a_ = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
a_ = F"up_blocks.{i}.resnets.{j}."
a_ = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
a_ = F"up_blocks.{i}.attentions.{j}."
a_ = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
a_ = F"down_blocks.{i}.downsamplers.0.conv."
a_ = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
a_ = F"up_blocks.{i}.upsamplers.0."
a_ = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
a_ = '''mid_block.attentions.0.'''
a_ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
a_ = F"mid_block.resnets.{j}."
a_ = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _a ( UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ = v.replace(_lowercase , _lowercase )
lowerCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ = v.replace(_lowercase , _lowercase )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
a_ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
a_ = F"encoder.down_blocks.{i}.resnets.{j}."
a_ = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
a_ = F"down_blocks.{i}.downsamplers.0."
a_ = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
a_ = F"up_blocks.{i}.upsamplers.0."
a_ = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
a_ = F"decoder.up_blocks.{i}.resnets.{j}."
a_ = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
a_ = F"mid_block.resnets.{i}."
a_ = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
a_ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _a ( UpperCamelCase_ : Tuple ) -> int:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ = v.replace(_lowercase , _lowercase )
lowerCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ = v.replace(_lowercase , _lowercase )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
lowerCAmelCase__ = reshape_weight_for_sd(_lowercase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
a_ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
a_ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
a_ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
a_ = {'''q''': 0, '''k''': 1, '''v''': 2}
def _a ( UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowerCAmelCase__ = k[: -len(".q_proj.weight" )]
lowerCAmelCase__ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowerCAmelCase__ = k[: -len(".q_proj.bias" )]
lowerCAmelCase__ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
lowerCAmelCase__ = textenc_pattern.sub(lambda UpperCamelCase_ : protected[re.escape(m.group(0 ) )] , _lowercase )
lowerCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda UpperCamelCase_ : protected[re.escape(m.group(0 ) )] , _lowercase )
lowerCAmelCase__ = torch.cat(_lowercase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda UpperCamelCase_ : protected[re.escape(m.group(0 ) )] , _lowercase )
lowerCAmelCase__ = torch.cat(_lowercase )
return new_state_dict
def _a ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
a_ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
a_ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
a_ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
a_ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
a_ = load_file(unet_path, device='''cpu''')
else:
a_ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
a_ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
a_ = load_file(vae_path, device='''cpu''')
else:
a_ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
a_ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
a_ = load_file(text_enc_path, device='''cpu''')
else:
a_ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
a_ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
a_ = convert_unet_state_dict(unet_state_dict)
a_ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
a_ = convert_vae_state_dict(vae_state_dict)
a_ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
a_ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
a_ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
a_ = convert_text_enc_state_dict_vaa(text_enc_dict)
a_ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
a_ = convert_text_enc_state_dict(text_enc_dict)
a_ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
a_ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
a_ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
a_ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 709 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 115 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__snake_case = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
__snake_case = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = list(state_dict.keys() )
for name in state_dict_keys:
__UpperCamelCase = state_dict.pop(_lowercase )
# emb -> embedding
if name.startswith('emb.' ):
__UpperCamelCase = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__UpperCamelCase = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__UpperCamelCase = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _lowercase )
# ffn -> feed_forward
__UpperCamelCase = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__UpperCamelCase = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__UpperCamelCase = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__UpperCamelCase = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__UpperCamelCase = 'rwkv.' + name
__UpperCamelCase = weight
return state_dict
def _A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=None ) -> Any:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__UpperCamelCase = 5_02_77
__UpperCamelCase = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__UpperCamelCase = PreTrainedTokenizerFast(tokenizer_file=_lowercase )
__UpperCamelCase = len(_lowercase )
tokenizer.save_pretrained(_lowercase )
# 2. Build the config
__UpperCamelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__UpperCamelCase = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
__UpperCamelCase = RwkvConfig(
vocab_size=_lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_lowercase )
# 3. Download model file then convert state_dict
__UpperCamelCase = hf_hub_download(_lowercase , _lowercase )
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
__UpperCamelCase = convert_state_dict(_lowercase )
# 4. Split in shards and save
__UpperCamelCase, __UpperCamelCase = shard_checkpoint(_lowercase )
for shard_file, shard in shards.items():
torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) )
if index is not None:
__UpperCamelCase = os.path.join(_lowercase , _lowercase )
# Save the index as well
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
__UpperCamelCase = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '\n'
f.write(_lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__UpperCamelCase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__UpperCamelCase = torch.load(os.path.join(_lowercase , _lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowercase , _lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(_lowercase )
model.push_to_hub(_lowercase , max_shard_size='2GB' )
tokenizer.push_to_hub(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
__snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCamelCase_ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {F'''funnel-transformer/{name}''': 5_12 for name in _model_names}
UpperCamelCase_ = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = FunnelTokenizer
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = 2
def __init__( self : Union[str, Any] , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : str=True , snake_case_ : Optional[Any]="<unk>" , snake_case_ : List[str]="<sep>" , snake_case_ : str="<pad>" , snake_case_ : Optional[Any]="<cls>" , snake_case_ : Dict="<mask>" , snake_case_ : Dict="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : int=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=None , snake_case_ : str="##" , **snake_case_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , clean_text=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , wordpieces_prefix=snake_case_ , **snake_case_ , )
A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
A : Union[str, Any] = getattr(snake_case_ , normalizer_state.pop('''type''' ) )
A : List[str] = do_lower_case
A : int = strip_accents
A : int = tokenize_chinese_chars
A : Any = normalizer_class(**snake_case_ )
A : Any = do_lower_case
def _UpperCAmelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[Any]=None ):
"""simple docstring"""
A : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
"""simple docstring"""
A : Optional[int] = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
"""simple docstring"""
A : Dict = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ ) | 256 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 'philschmid/bart-large-cnn-samsum'
__SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
__SCREAMING_SNAKE_CASE : List[str] = 'summarizer'
__SCREAMING_SNAKE_CASE : Dict = AutoTokenizer
__SCREAMING_SNAKE_CASE : Dict = AutoModelForSeqaSeqLM
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['text']
__SCREAMING_SNAKE_CASE : Optional[Any] = ['text']
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
return self.pre_processor(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
return self.model.generate(**_lowerCamelCase )[0]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
return self.pre_processor.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
| 333 |
from __future__ import annotations
import pandas as pd
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = burst_time[i]
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = 999_999_999
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(a__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE : List[Any] = remaining_time[j]
SCREAMING_SNAKE_CASE : Tuple = j
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE : Dict = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE : Dict = 999_999_999
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE : Any = False
# Find finish time of current process
SCREAMING_SNAKE_CASE : Optional[Any] = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE : Optional[int] = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE : List[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Tuple = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE : List[Any] = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a__ : Any = int(input())
a__ : Dict = [0] * no_of_processes
a__ : Dict = [0] * no_of_processes
a__ : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a__ , a__ : int = map(int, input().split())
a__ : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ : Optional[int] = burst_time
a__ : int = no_of_processes
a__ : Dict = waiting_time
a__ : List[str] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a__ : List[str] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 333 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase ( __a ):
a__ :str = '''M-CLIP'''
def __init__(self , __UpperCamelCase=1_024 , __UpperCamelCase=768 , **__UpperCamelCase ) -> Any:
UpperCamelCase_ : int = transformerDimSize
UpperCamelCase_ : Dict = imageDimSize
super().__init__(**__UpperCamelCase )
class UpperCamelCase ( __a ):
a__ :Optional[int] = MCLIPConfig
def __init__(self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
super().__init__(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ : Any = XLMRobertaModel(__UpperCamelCase )
UpperCamelCase_ : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ (self , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
UpperCamelCase_ : Optional[Any] = self.transformer(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
UpperCamelCase_ : List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCamelCase ), embs
| 635 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] ):
UpperCamelCase_ : Optional[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
UpperCamelCase_ : Optional[Any] = DetaConfig(
backbone_config=_SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_SCREAMING_SNAKE_CASE , with_box_refine=_SCREAMING_SNAKE_CASE , two_stage=_SCREAMING_SNAKE_CASE , )
# set labels
UpperCamelCase_ : Dict = """huggingface/label-files"""
if "o365" in model_name:
UpperCamelCase_ : List[Any] = 366
UpperCamelCase_ : str = """object365-id2label.json"""
else:
UpperCamelCase_ : Any = 91
UpperCamelCase_ : Optional[int] = """coco-detection-id2label.json"""
UpperCamelCase_ : Union[str, Any] = num_labels
UpperCamelCase_ : int = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) )
UpperCamelCase_ : Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase_ : List[str] = idalabel
UpperCamelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCamelCase_ : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ):
UpperCamelCase_ : Dict = dct.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ):
UpperCamelCase_ : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase_ : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase_ : List[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase_ : Tuple = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : Dict = in_proj_weight[:dim, :]
UpperCamelCase_ : Dict = in_proj_bias[: dim]
UpperCamelCase_ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase_ : Optional[int] = in_proj_bias[
dim : dim * 2
]
UpperCamelCase_ : List[str] = in_proj_weight[
-dim :, :
]
UpperCamelCase_ : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ):
# transformer decoder self-attention layers
UpperCamelCase_ : List[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase_ : str = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase_ : Optional[Any] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : Optional[Any] = in_proj_weight[:hidden_size, :]
UpperCamelCase_ : int = in_proj_bias[:hidden_size]
UpperCamelCase_ : Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase_ : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase_ : Dict = in_proj_weight[-hidden_size:, :]
UpperCamelCase_ : Dict = in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_ : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ):
UpperCamelCase_ : Tuple = get_deta_config(_SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase_ : int = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ : int = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
UpperCamelCase_ : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(_SCREAMING_SNAKE_CASE , param.shape )
# rename keys
UpperCamelCase_ : List[str] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(_SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase_ : Dict = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = val
if "input_proj" in key:
UpperCamelCase_ : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase_ : int = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = val
# finally, create HuggingFace model and load state dict
UpperCamelCase_ : List[Any] = DetaForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(_SCREAMING_SNAKE_CASE )
# load image processor
UpperCamelCase_ : Optional[Any] = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
UpperCamelCase_ : Optional[int] = prepare_img()
UpperCamelCase_ : Any = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCamelCase_ : int = encoding["""pixel_values"""]
UpperCamelCase_ : Tuple = model(pixel_values.to(_SCREAMING_SNAKE_CASE ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase_ : Optional[Any] = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
UpperCamelCase_ : Optional[Any] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ : str = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
UpperCamelCase_ : Union[str, Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 635 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase ) | 493 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : int = LayoutLMTokenizer
a : Optional[int] = LayoutLMTokenizerFast
a : Optional[int] = True
a : Any = True
def lowercase ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self , **UpperCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE = "unwanted, running"
return input_text, output_text
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self ):
pass | 493 | 1 |
"""simple docstring"""
def __lowercase ( _a , _a ):
snake_case_ : Union[str, Any] = 0
snake_case_ : Any = len(_a ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_a ):
return None
snake_case_ : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
snake_case_ : Optional[Any] = left
snake_case_ : List[Any] = point
elif point > right:
snake_case_ : Tuple = right
snake_case_ : List[Any] = point
else:
if item < current_item:
snake_case_ : Dict = point - 1
else:
snake_case_ : Union[str, Any] = point + 1
return None
def __lowercase ( _a , _a , _a , _a ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_a ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_a , _a , _a , _a )
elif point > right:
return interpolation_search_by_recursion(_a , _a , _a , _a )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_a , _a , _a , point - 1 )
else:
return interpolation_search_by_recursion(
_a , _a , point + 1 , _a )
def __lowercase ( _a ):
if collection != sorted(_a ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
lowercase__ : Union[str, Any] = 0
if debug == 1:
lowercase__ : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowercase__ : int = 67
lowercase__ : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 123 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = '''conditional_detr'''
_UpperCamelCase : int = ['''past_key_values''']
_UpperCamelCase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[int] , _A : Union[str, Any]=True , _A : Union[str, Any]=None , _A : str=3 , _A : Optional[int]=300 , _A : Optional[Any]=6 , _A : Union[str, Any]=2_048 , _A : List[str]=8 , _A : List[str]=6 , _A : Union[str, Any]=2_048 , _A : int=8 , _A : Union[str, Any]=0.0 , _A : Union[str, Any]=0.0 , _A : Any=True , _A : int="relu" , _A : Any=256 , _A : List[Any]=0.1 , _A : List[Any]=0.0 , _A : int=0.0 , _A : Any=0.02 , _A : Optional[Any]=1.0 , _A : Union[str, Any]=False , _A : Optional[int]="sine" , _A : int="resnet50" , _A : List[str]=True , _A : Union[str, Any]=False , _A : Optional[Any]=2 , _A : Optional[int]=5 , _A : Tuple=2 , _A : int=1 , _A : Any=1 , _A : Optional[int]=2 , _A : Optional[int]=5 , _A : Optional[Any]=2 , _A : List[Any]=0.25 , **_A : Any , ) -> int:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_A , _A ):
lowercase : Union[str, Any] = backbone_config.get('''model_type''' )
lowercase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowercase : int = config_class.from_dict(_A )
lowercase : Tuple = use_timm_backbone
lowercase : List[str] = backbone_config
lowercase : Optional[Any] = num_channels
lowercase : int = num_queries
lowercase : Optional[Any] = d_model
lowercase : List[str] = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : int = encoder_attention_heads
lowercase : Dict = decoder_ffn_dim
lowercase : Any = decoder_layers
lowercase : str = decoder_attention_heads
lowercase : Union[str, Any] = dropout
lowercase : int = attention_dropout
lowercase : str = activation_dropout
lowercase : Any = activation_function
lowercase : List[Any] = init_std
lowercase : Dict = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Optional[Any] = decoder_layerdrop
lowercase : List[str] = encoder_layers
lowercase : str = auxiliary_loss
lowercase : List[Any] = position_embedding_type
lowercase : Dict = backbone
lowercase : Union[str, Any] = use_pretrained_backbone
lowercase : List[str] = dilation
# Hungarian matcher
lowercase : List[Any] = class_cost
lowercase : Tuple = bbox_cost
lowercase : Optional[Any] = giou_cost
# Loss coefficients
lowercase : Optional[int] = mask_loss_coefficient
lowercase : Optional[int] = dice_loss_coefficient
lowercase : Any = cls_loss_coefficient
lowercase : Optional[Any] = bbox_loss_coefficient
lowercase : List[str] = giou_loss_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.d_model
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase : List[Any] = self.backbone_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = version.parse('''1.11''' )
@property
def __a ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __a ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return 12 | 712 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ = get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Optional[Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : List[str] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Union[str, Any] = os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
lowercase : Optional[Any] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__magic_name__ , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__magic_name__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase : List[str] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : Tuple = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Any = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Optional[int] = (
os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
lowercase : Optional[int] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__magic_name__ , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , planner=DefaultLoadPlanner() , )
lowercase : Dict = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> int:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Tuple = FSDP.optim_state_dict(__magic_name__ , __magic_name__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase : List[Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase : Tuple = os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase : int = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase : Optional[int] = torch.load(__magic_name__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase : str = (
os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
lowercase : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , )
lowercase : Optional[int] = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
lowercase : int = FSDP.optim_state_dict_to_load(__magic_name__ , __magic_name__ , __magic_name__ )
optimizer.load_state_dict(__magic_name__ ) | 596 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "roberta-prelayernorm"
def __init__( self , _a=50_265 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = classifier_dropout
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 543 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a__ ( snake_case__ ) -> Dict[str, torch.Tensor]:
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
for rt in rc.restypes:
lowerCamelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCamelCase = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowerCamelCase = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCamelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCamelCase = rc.restype_atoa[restype_letter]
lowerCamelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCamelCase = rc.atom_order[atom_name]
lowerCamelCase = 1
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
return protein
def a__ ( snake_case__ ) -> Dict[str, np.ndarray]:
lowerCamelCase = tree_map(lambda snake_case__ : torch.tensor(snake_case__ , device=batch["""aatype"""].device ) , snake_case__ , np.ndarray )
lowerCamelCase = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out
| 543 | 1 |
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ ) -> tuple[np.ndarray, np.ndarray]:
lowerCamelCase , lowerCamelCase : Optional[Any] =np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : Union[str, Any] =(
'''\'table\' has to be of square shaped array but got a '''
F"{rows}x{columns} array:\n{table}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =np.zeros((rows, columns) )
lowerCamelCase : Any =np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[int] =sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
lowerCamelCase : Optional[Any] =(table[i][j] - total) / upper[j][j]
lowerCamelCase : str =1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any =sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Union[str, Any] =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 262 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[int] = ShapEPipeline
_lowercase : str = ['prompt']
_lowercase : int = ['prompt']
_lowercase : Optional[int] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_lowercase : Any = False
@property
def UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
return 3_2
@property
def UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return 3_2
@property
def UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return 8
@property
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
A_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
A_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
A_ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
A_ = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
A_ = ShapERenderer(**lowerCamelCase__ )
return model
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.dummy_prior
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_renderer
A_ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
A_ = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('''mps''' ):
A_ = torch.manual_seed(lowerCamelCase__ )
else:
A_ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A_ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ = '''cpu'''
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**lowerCamelCase__ )
A_ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
A_ = output.images[0]
A_ = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
A_ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
A_ = torch_device == '''cpu'''
A_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**lowerCamelCase__ )
A_ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = 1
A_ = 2
A_ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
A_ = batch_size * [inputs[key]]
A_ = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
A_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
A_ = ShapEPipeline.from_pretrained('''openai/shap-e''' )
A_ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
A_ = pipe(
'''a shark''' , generator=lowerCamelCase__ , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 203 |
from __future__ import annotations
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if start is None:
A_ = 0
if end is None:
A_ = len(SCREAMING_SNAKE_CASE ) - 1
if start >= end:
return
A_ = (start + end) // 2
slowsort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
slowsort(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
if sequence[end] < sequence[mid]:
A_ ,A_ = sequence[mid], sequence[end]
slowsort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 203 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
snake_case__ : int = {
"""input_ids""": tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
snake_case__ : Optional[int] = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
snake_case__ : Tuple = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 714 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ : Any = state_dict.pop(__magic_name__ )
snake_case__ : Optional[Any] = val
def UpperCamelCase__ ( __magic_name__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case__ : Tuple = value
else:
snake_case__ : Union[str, Any] = value
return new_state_dict
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Tuple=False ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = """"""
if is_panoptic:
snake_case__ : int = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
snake_case__ : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[Any] = in_proj_weight[:2_56, :]
snake_case__ : int = in_proj_bias[:2_56]
snake_case__ : int = in_proj_weight[2_56:5_12, :]
snake_case__ : Tuple = in_proj_bias[2_56:5_12]
snake_case__ : int = in_proj_weight[-2_56:, :]
snake_case__ : Any = in_proj_bias[-2_56:]
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : str = """resnet101"""
if "dc5" in model_name:
snake_case__ : Optional[int] = True
snake_case__ : str = """panoptic""" in model_name
if is_panoptic:
snake_case__ : List[Any] = 2_50
else:
snake_case__ : Optional[Any] = 91
snake_case__ : Optional[Any] = """huggingface/label-files"""
snake_case__ : Optional[int] = """coco-detection-id2label.json"""
snake_case__ : int = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : Optional[Any] = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : Optional[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case__ : Union[str, Any] = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case__ : str = prepare_img()
snake_case__ : Tuple = image_processor(images=__magic_name__ , return_tensors="""pt""" )
snake_case__ : Any = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
snake_case__ : Optional[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , __magic_name__ , pretrained=__magic_name__ ).eval()
snake_case__ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : Dict = """conditional_detr.""" + src
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ , is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : List[Any] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case__ : Dict = state_dict.pop(__magic_name__ )
snake_case__ : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[int] = state_dict.pop(__magic_name__ )
snake_case__ : Optional[int] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case__ : str = state_dict.pop(__magic_name__ )
snake_case__ : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case__ : int = state_dict.pop(__magic_name__ )
snake_case__ : str = val
# finally, create HuggingFace model and load state dict
snake_case__ : Tuple = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
snake_case__ : Union[str, Any] = conditional_detr(__magic_name__ )
snake_case__ : Dict = model(__magic_name__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 419 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Tuple = DebertaTokenizer
a: Dict = True
a: Any = DebertaTokenizerFast
def _A ( self: Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
_a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''[UNK]'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def _A ( self: Dict , **__UpperCamelCase: List[str] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _A ( self: str , __UpperCamelCase: Optional[Any] ):
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def _A ( self: Dict ):
_a = self.get_tokenizer()
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def _A ( self: Any ):
_a = self.get_tokenizer()
_a = tokenizer('''Hello''' , '''World''' )
_a = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __UpperCamelCase )
@slow
def _A ( self: str ):
_a = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCamelCase )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCamelCase )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _A ( self: Dict ):
_a = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_a = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
_a = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
_a = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
_a = [tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) for seq in encoding['''input_ids''']]
# fmt: off
_a = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_a = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __UpperCamelCase )
for expected, decoded in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 487 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCamelCase :Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCamelCase :Tuple = {'facebook/blenderbot_small-90M': 512}
def __snake_case ( _UpperCamelCase ) -> Any:
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
_a = set(_UpperCamelCase )
return pairs
class UpperCAmelCase ( __snake_case ):
a: Any = VOCAB_FILES_NAMES
a: Any = PRETRAINED_VOCAB_FILES_MAP
a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: int = ["input_ids", "attention_mask"]
def __init__( self: str , __UpperCamelCase: List[str] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any]="__start__" , __UpperCamelCase: int="__end__" , __UpperCamelCase: Optional[int]="__unk__" , __UpperCamelCase: int="__null__" , **__UpperCamelCase: str , ):
super().__init__(unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_a = json.load(__UpperCamelCase )
_a = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_a = merges_handle.read().split('''\n''' )[1:-1]
_a = [tuple(merge.split() ) for merge in merges]
_a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_a = {}
@property
def _A ( self: List[Any] ):
return len(self.encoder )
def _A ( self: Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self: Optional[int] , __UpperCamelCase: str ):
if token in self.cache:
return self.cache[token]
_a = re.sub('''([.,!?()])''' , R''' \1''' , __UpperCamelCase )
_a = re.sub('''(\')''' , R''' \1 ''' , __UpperCamelCase )
_a = re.sub(R'''\s{2,}''' , ''' ''' , __UpperCamelCase )
if "\n" in token:
_a = token.replace('''\n''' , ''' __newln__''' )
_a = token.split(''' ''' )
_a = []
for token in tokens:
if not len(__UpperCamelCase ):
continue
_a = token.lower()
_a = tuple(__UpperCamelCase )
_a = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_a = get_pairs(__UpperCamelCase )
if not pairs:
words.append(__UpperCamelCase )
continue
while True:
_a = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__UpperCamelCase ):
try:
_a = word.index(__UpperCamelCase , __UpperCamelCase )
new_word.extend(word[i:j] )
_a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__UpperCamelCase )
_a = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_a = get_pairs(__UpperCamelCase )
_a = '''@@ '''.join(__UpperCamelCase )
_a = word[:-4]
_a = word
words.append(__UpperCamelCase )
return " ".join(__UpperCamelCase )
def _A ( self: str , __UpperCamelCase: str ):
_a = []
_a = re.findall(R'''\S+\n?''' , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(''' ''' ) ) )
return split_tokens
def _A ( self: Optional[int] , __UpperCamelCase: str ):
_a = token.lower()
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _A ( self: List[Any] , __UpperCamelCase: int ):
return self.decoder.get(__UpperCamelCase , self.unk_token )
def _A ( self: Any , __UpperCamelCase: List[str] ):
_a = ''' '''.join(__UpperCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _A ( self: int , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_a = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
_a = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 487 | 1 |
"""simple docstring"""
from itertools import permutations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowercase : Dict = [7, 11, 13, 17]
for i, test in enumerate(UpperCAmelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 10 ):
return sum(
int("""""".join(map(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
for num in permutations(range(UpperCAmelCase__ ) )
if is_substring_divisible(UpperCAmelCase__ ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 714 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "upernet"
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=[1, 2, 3, 6] ,UpperCAmelCase_=True ,UpperCAmelCase_=0.4 ,UpperCAmelCase_=3_84 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=1 ,UpperCAmelCase_=False ,UpperCAmelCase_=2_55 ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowercase : List[str] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = backbone_config.get("""model_type""" )
_lowercase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_lowercase : str = config_class.from_dict(UpperCAmelCase_ )
_lowercase : Union[str, Any] = backbone_config
_lowercase : Dict = hidden_size
_lowercase : int = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : Dict = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : List[str] = auxiliary_in_channels
_lowercase : List[str] = auxiliary_channels
_lowercase : Optional[int] = auxiliary_num_convs
_lowercase : List[Any] = auxiliary_concat_input
_lowercase : List[Any] = loss_ignore_index
def lowerCamelCase__ ( self ):
_lowercase : int = copy.deepcopy(self.__dict__ )
_lowercase : List[Any] = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 600 | 0 |
"""simple docstring"""
from manim import *
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("CPU" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[mem.copy() for i in range(1 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("GPU" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.align_to(UpperCamelCase_ , UpperCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("Model" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , )
UpperCamelCase_ =MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
UpperCamelCase_ =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ =MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=2.5 ) , Write(UpperCamelCase_ ) , Write(UpperCamelCase_ ) )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[]
UpperCamelCase_ =[]
UpperCamelCase_ =[]
for i, rect in enumerate(UpperCamelCase_ ):
UpperCamelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase_ )
cpu_target.generate_target()
UpperCamelCase_ =0.46 / 4
UpperCamelCase_ =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase_ , buff=0.0 )
cpu_targs.append(UpperCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase_ ) )
second_animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) )
self.play(*UpperCamelCase_ )
self.play(*UpperCamelCase_ )
self.wait()
| 391 |
"""simple docstring"""
import datasets
A_ = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
A_ = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
A_ = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def _UpperCamelCase ( A , A ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self: Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ):
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
| 391 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( _lowercase ):
@staticmethod
@abstractmethod
def __magic_name__( __UpperCAmelCase ):
raise NotImplementedError()
@abstractmethod
def __magic_name__( self ):
raise NotImplementedError()
| 470 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = SMALL_MODEL_IDENTIFIER
lowerCAmelCase__ : List[str] = '''pt'''
lowerCAmelCase__ : int = '''tf'''
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : str = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''mock_framework'''
# Framework provided - return whatever the user provides
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : int = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
lowerCAmelCase__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase__ : Union[str, Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase__ : Union[str, Any] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase__ : Optional[int] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ : Dict = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(self.test_model )
| 470 | 1 |
'''simple docstring'''
from math import factorial
lowerCAmelCase = {str(digit): factorial(digit) for digit in range(10)}
def __A ( a_ : List[Any] ):
if not isinstance(a_ ,a_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a_ ) )
def __A ( a_ : Optional[Any] = 6_0 ,a_ : Any = 1_0_0_0_0_0_0 ):
if not isinstance(a_ ,a_ ) or not isinstance(a_ ,a_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowerCAmelCase : List[Any] = 0
# the cached sizes of the previous chains
lowerCAmelCase : str = {}
for start_chain_element in range(1 ,a_ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase : str = set()
lowerCAmelCase : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a_ )
chain_set_length += 1
lowerCAmelCase : Tuple = digit_factorial_sum(a_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 525 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ):
'''simple docstring'''
A__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
A__ = Image.open(requests.get(UpperCAmelCase ,stream=UpperCAmelCase ).raw ).convert('RGB' )
return image
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = dct.pop(UpperCAmelCase )
A__ = val
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
A__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
A__ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase ,requires_grad=UpperCAmelCase ), v_bias) )
A__ = qkv_bias
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = 364 if 'coco' in model_name else 224
A__ = InstructBlipVisionConfig(image_size=UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
A__ = TaConfig.from_pretrained('google/flan-t5-xl' ,dense_act_fn='gelu' ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A__ = TaConfig.from_pretrained('google/flan-t5-xxl' ,dense_act_fn='gelu' ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
A__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' ,vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
A__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' ,vocab_size=32001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
A__ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
A__ = InstructBlipConfig(vision_config=UpperCAmelCase ,text_config=UpperCAmelCase ,qformer_config=UpperCAmelCase )
return config, image_size
@torch.no_grad()
def _A ( UpperCAmelCase ,UpperCAmelCase=None ,UpperCAmelCase=False ):
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('bert-base-uncased' ,truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
A__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' ,truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
A__ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' ,truncation_side='left' ,bos_token='</s>' ,unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
A__ , A__ = get_blipa_config(UpperCAmelCase )
A__ = InstructBlipForConditionalGeneration(UpperCAmelCase ).eval()
A__ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
A__ , A__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
A__ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
A__ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
A__ , A__ , A__ = load_model_and_preprocess(
name=UpperCAmelCase ,model_type=UpperCAmelCase ,is_eval=UpperCAmelCase ,device=UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
A__ = original_model.state_dict()
A__ = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A__ = state_dict.pop(UpperCAmelCase )
if key.startswith('Qformer.bert' ):
A__ = key.replace('Qformer.bert' ,'qformer' )
if "attention.self" in key:
A__ = key.replace('self' ,'attention' )
if "llm_proj" in key:
A__ = key.replace('llm_proj' ,'language_projection' )
if "t5_proj" in key:
A__ = key.replace('t5_proj' ,'language_projection' )
if key.startswith('llm_model' ):
A__ = key.replace('llm_model' ,'language_model' )
if key.startswith('t5' ):
A__ = key.replace('t5' ,'language' )
A__ = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase ,UpperCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase ,strict=UpperCAmelCase )
A__ = load_demo_image()
A__ = 'What is unusual about this image?'
# create processor
A__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} ,image_mean=UpperCAmelCase ,image_std=UpperCAmelCase )
A__ = InstructBlipProcessor(
image_processor=UpperCAmelCase ,tokenizer=UpperCAmelCase ,qformer_tokenizer=UpperCAmelCase ,)
A__ = processor(images=UpperCAmelCase ,text=UpperCAmelCase ,return_tensors='pt' ).to(UpperCAmelCase )
# make sure processor creates exact same pixel values
A__ = vis_processors['eval'](UpperCAmelCase ).unsqueeze(0 ).to(UpperCAmelCase )
A__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,UpperCAmelCase )
original_model.to(UpperCAmelCase )
hf_model.to(UpperCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
A__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
A__ = hf_model(**UpperCAmelCase ).logits
else:
A__ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
A__ = tokenizer('\n' ,return_tensors='pt' ).input_ids.to(UpperCAmelCase )
A__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
A__ = hf_model(**UpperCAmelCase ,labels=UpperCAmelCase ).logits
print('First values of original logits:' ,original_logits[0, :3, :3] )
print('First values of HF logits:' ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
A__ = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,UpperCAmelCase ,atol=UpperCAmelCase )
print('Looks ok!' )
print('Generating with original model...' )
A__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
A__ = hf_model.generate(
**UpperCAmelCase ,do_sample=UpperCAmelCase ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
A__ = 2
print('Original generation:' ,UpperCAmelCase )
A__ = processor.batch_decode(UpperCAmelCase ,skip_special_tokens=UpperCAmelCase )
A__ = [text.strip() for text in output_text]
print('HF generation:' ,UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase )
hf_model.save_pretrained(UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
lowerCAmelCase_ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 531 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCamelCase ( _A : Union[str, Any] , _A : str , _A : str , _A : Any , _A : Any ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ ) as metadata_file:
lowerCAmelCase : Tuple = json.load(lowerCamelCase_ )
lowerCAmelCase : str = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
lowerCAmelCase : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )["""module"""]
# Load the entity vocab file
lowerCAmelCase : Optional[int] = load_original_entity_vocab(lowerCamelCase_ )
# add an entry for [MASK2]
lowerCAmelCase : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase : List[Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase : str = AddedToken('<ent>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
lowerCAmelCase : Tuple = AddedToken('<ent2>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'r' ) as f:
lowerCAmelCase : Dict = json.load(lowerCamelCase_ )
lowerCAmelCase : Union[str, Any] = """MLukeTokenizer"""
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase : Any = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(['@'] )[0]
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(['#'] )[0]
lowerCAmelCase : int = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase : Dict = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase : Any = state_dict[bias_name]
lowerCAmelCase : int = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase : Dict = F"encoder.layer.{layer_index}.attention.self."
lowerCAmelCase : Any = state_dict[prefix + matrix_name]
lowerCAmelCase : int = state_dict[prefix + matrix_name]
lowerCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase : List[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase : List[str] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase : List[Any] = state_dict["""entity_predictions.bias"""]
lowerCAmelCase : str = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase : Tuple = LukeForMaskedLM(config=lowerCamelCase_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
lowerCAmelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
lowerCAmelCase : Optional[Any] = state_dict[key]
else:
lowerCAmelCase : Dict = state_dict[key]
lowerCAmelCase : str = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if set(lowerCamelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(lowerCamelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase : str = MLukeTokenizer.from_pretrained(lowerCamelCase_ , task='entity_classification' )
lowerCAmelCase : Dict = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowerCAmelCase : Union[str, Any] = (0, 9)
lowerCAmelCase : Dict = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase : List[Any] = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : List[Any] = torch.Size((1, 33, 7_68) )
lowerCAmelCase : Any = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : List[Any] = torch.Size((1, 1, 7_68) )
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
lowerCAmelCase : Dict = """Tokyo is the capital of <mask>."""
lowerCAmelCase : Optional[int] = (24, 30)
lowerCAmelCase : Optional[Any] = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase : Dict = model(**lowerCamelCase_ )
lowerCAmelCase : List[str] = encoding["""input_ids"""][0].tolist()
lowerCAmelCase : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
lowerCAmelCase : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase_ )
lowerCAmelCase : Dict = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase : Any = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def __UpperCamelCase ( _A : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowerCAmelCase : List[Any] = [json.loads(lowerCamelCase_ ) for line in open(lowerCamelCase_ )]
lowerCAmelCase : Tuple = {}
for entry in data:
lowerCAmelCase : List[str] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase : Optional[int] = entity_id
break
lowerCAmelCase : int = F"{language}:{entity_name}"
lowerCAmelCase : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 713 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_SCREAMING_SNAKE_CASE = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 256
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[MinHash]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
__snake_case = MinHash(num_perm=SCREAMING_SNAKE_CASE )
for token in set(SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class __magic_name__ :
def __init__( self : List[Any] , *,
snake_case_ : float = 0.85 , ):
__snake_case = duplication_jaccard_threshold
__snake_case = NUM_PERM
__snake_case = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__snake_case = defaultdict(snake_case_ )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : MinHash ):
__snake_case = self._index.query(snake_case_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case_ )
def lowerCAmelCase ( self : List[str] ):
__snake_case = []
for base, duplicates in self._duplicate_clusters.items():
__snake_case = [base] + list(snake_case_ )
# reformat the cluster to be a list of dict
__snake_case = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(snake_case_ )
return duplicate_clusters
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Any ):
__snake_case = self.get_duplicate_clusters()
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case = element
__snake_case = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
__snake_case = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_00 ) ):
di.add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
__snake_case = get_tokens(SCREAMING_SNAKE_CASE )
__snake_case = get_tokens(SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_SCREAMING_SNAKE_CASE = None
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = []
for elementa in cluster:
__snake_case = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__snake_case = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__snake_case = 1
extremes.append(SCREAMING_SNAKE_CASE )
return extremes
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
global _shared_dataset
__snake_case = dataset
__snake_case = []
__snake_case = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) , total=len(SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE )
return extremes_list
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
__snake_case = make_duplicate_clusters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__snake_case = {}
__snake_case = find_extremes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
__snake_case = element
__snake_case = duplicate_indices - set(extreme_dict.keys() )
__snake_case = dataset.filter(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__snake_case = element["base_index"] in extreme_dict
if element["is_extreme"]:
__snake_case = extreme_dict[element["base_index"]]["copies"]
print(F'''Original dataset size: {len(SCREAMING_SNAKE_CASE )}''' )
print(F'''Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE )}''' )
print(F'''Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}''' )
print(F'''Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}''' )
print(F'''Filtered dataset size: {len(SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 163 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_SCREAMING_SNAKE_CASE = F"""https://www.google.com/search?q={query}&num=100"""
_SCREAMING_SNAKE_CASE = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_SCREAMING_SNAKE_CASE = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 163 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
lowerCAmelCase = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 707 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase = (n * (n + 1) // 2) ** 2
lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 393 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ = 1_2_8_0_2_2
lowercase__ = 1_2_8_0_2_8
@require_sentencepiece
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MaMaaaTokenizer
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
snake_case : int = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = '''</s>'''
snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Tuple = self.get_tokenizer()
snake_case : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''This is a test''' )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = {'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = """facebook/m2m100_418M"""
lowerCamelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCamelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCamelCase = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def lowerCAmelCase ( cls : List[Any] ) -> int:
"""simple docstring"""
snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
snake_case : List[str] = 1
return cls
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = '''en'''
snake_case : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case : str = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case : int = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
snake_case : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = '''en'''
snake_case : int = '''fr'''
snake_case : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' )
snake_case : List[str] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case : Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case : Optional[int] = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : List[Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , )
| 638 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ = logging.get_logger(__name__)
enable_full_determinism()
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = 4
snake_case : List[Any] = 3
snake_case : str = (32, 32)
snake_case : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : Tuple = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case : Any = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case : str = 4
snake_case : Tuple = 4
snake_case : str = (32, 32)
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : str = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
snake_case : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase__ )
snake_case : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
snake_case ,snake_case : str = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
snake_case : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case ,snake_case : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
snake_case : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case : int = noise.to(UpperCamelCase__ )
snake_case : List[str] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
snake_case : Union[str, Any] = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case ,snake_case : List[Any] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
snake_case : Union[str, Any] = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(UpperCamelCase__ )
snake_case : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case : Dict = noise.to(UpperCamelCase__ )
snake_case : Any = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : Union[str, Any] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case : Tuple = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any]=(32, 32) ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = 4
snake_case : Tuple = 3
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case : Dict = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase__ )
snake_case : str = self.dummy_input
snake_case : Dict = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
snake_case : Union[str, Any] = noise
snake_case : List[Any] = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(UpperCamelCase__ )
snake_case : Dict = 4
snake_case : Optional[int] = 3
snake_case : Tuple = (256, 256)
snake_case : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : List[str] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case : Optional[int] = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(UpperCamelCase__ )
snake_case : Optional[Any] = 4
snake_case : List[Any] = 3
snake_case : Union[str, Any] = (32, 32)
snake_case : Any = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : Any = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : Dict = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case : int = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
| 638 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (PNDMScheduler,)
__SCREAMING_SNAKE_CASE = (('''num_inference_steps''', 50),)
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step_prk(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step_plms(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step_prk(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step_plms(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step_prk(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step_plms(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase,'''set_timesteps''' ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase,'''set_timesteps''' ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(__lowerCamelCase,0,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = scheduler.step_prk(__lowerCamelCase,1,__lowerCamelCase,**__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape,sample.shape )
self.assertEqual(output_a.shape,output_a.shape )
A__ = scheduler.step_plms(__lowerCamelCase,0,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = scheduler.step_plms(__lowerCamelCase,1,__lowerCamelCase,**__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape,sample.shape )
self.assertEqual(output_a.shape,output_a.shape )
def UpperCamelCase ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ),)
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001],[0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase,beta_end=__lowerCamelCase )
def UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 5, 10],[10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase )
def UpperCamelCase ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A__ = 27
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A__ = scheduler.step_prk(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
def UpperCamelCase ( self ):
with self.assertRaises(__lowerCamelCase ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
scheduler.step_plms(self.dummy_sample,1,self.dummy_sample ).prev_sample
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 718 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a__: Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''',__lowerCamelCase,)
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
| 212 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCAmelCase : Optional[int] = datasets.load_iris()
__UpperCAmelCase : Union[str, Any] = np.array(data["data"])
__UpperCAmelCase : int = np.array(data["target"])
__UpperCAmelCase : List[Any] = data["target_names"]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = train_test_split(X, y)
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Optional[int]=5 ) -> int:
'''simple docstring'''
snake_case__ :str = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
snake_case__ :Optional[Any] = []
for data_point in data:
snake_case__ :List[str] = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
snake_case__ :Any = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
snake_case__ :str = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 241 |
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
snake_case__ :Union[str, Any] = nums[0]
snake_case__ :List[Any] = 0
for num in nums[1:]:
snake_case__ , snake_case__ :Optional[Any] = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 241 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_UpperCamelCase : Dict = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def snake_case ( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Any , snake_case : List[Any] , snake_case : List[str]=False , snake_case : Any=True ) -> Tuple:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCAmelCase = cached_file(snake_case , snake_case , force_download=not use_cached_models )
lowerCAmelCase = config_class.from_json_file(snake_case )
lowerCAmelCase = True
lowerCAmelCase = True
print(F'Building TensorFlow model from configuration: {config}' )
lowerCAmelCase = model_class(snake_case )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCAmelCase = cached_file(
snake_case , snake_case , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCAmelCase = load_pytorch_checkpoint_in_tfa_model(snake_case , snake_case )
if compare_with_pt_model:
lowerCAmelCase = tf_model(tf_model.dummy_inputs , training=snake_case ) # build the network
lowerCAmelCase = torch.load(snake_case , map_location='cpu' )
lowerCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case , config=snake_case , state_dict=snake_case )
with torch.no_grad():
lowerCAmelCase = pt_model(**pt_model.dummy_inputs )
lowerCAmelCase = pto[0].numpy()
lowerCAmelCase = tfo[0].numpy()
lowerCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2e-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(snake_case , save_format='h5' )
def snake_case ( snake_case : int , snake_case : List[Any] , snake_case : Optional[Any]=None , snake_case : Optional[int]=None , snake_case : str=False , snake_case : int=False , snake_case : Optional[Any]=False , snake_case : str=False , ) -> Dict:
"""simple docstring"""
if args_model_type is None:
lowerCAmelCase = list(MODEL_CLASSES.keys() )
else:
lowerCAmelCase = [args_model_type]
for j, model_type in enumerate(snake_case , start=1 ):
print('=' * 100 )
print(F' Converting model type {j}/{len(snake_case )}: {model_type}' )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case , snake_case ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
lowerCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(snake_case )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
lowerCAmelCase = cached_file(snake_case , snake_case , force_download=not use_cached_models )
else:
lowerCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCAmelCase = cached_file(snake_case , snake_case , force_download=not use_cached_models )
else:
lowerCAmelCase = model_shortcut_name
if os.path.isfile(snake_case ):
lowerCAmelCase = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=snake_case , pytorch_checkpoint_path=snake_case , config_file=snake_case , tf_dump_path=os.path.join(snake_case , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=snake_case , )
if remove_cached_files:
os.remove(snake_case )
os.remove(snake_case )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
_UpperCamelCase : int = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 720 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 514 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowercase : List[str] = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowercase : int = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : str = set()
snake_case_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : List[Any] = char
snake_case_ : Any = set(_a )
return pairs
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = vocab_file
snake_case_ : Any = merges_file
snake_case_ : Any = {}
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 1
snake_case_ : Optional[int] = 2
snake_case_ : Optional[int] = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
snake_case_ : List[Any] = merges_handle.read().split("\n" )[:-1]
snake_case_ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case_ : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Optional[Any] = {}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
snake_case_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Dict:
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
snake_case_ : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Dict = bigram
snake_case_ : Any = []
snake_case_ : Any = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
snake_case_ : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Optional[Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = "@@ ".join(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = word[:-4]
snake_case_ : Union[str, Any] = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : List[Any] = []
snake_case_ : str = re.findall(r"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Any = " ".join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
snake_case_ : Dict = f.readlines()
for lineTmp in lines:
snake_case_ : Tuple = lineTmp.strip()
snake_case_ : Dict = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
snake_case_ : Optional[Any] = line[:idx]
snake_case_ : List[Any] = len(self.encoder )
| 568 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
_snake_case = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCamelCase = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase = int(re.match(r'.*layer_(\d*).*' , _lowercase )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
if dtype == torch.bool:
return 1 / 8
UpperCamelCase = re.search(r'[^\d](\d+)$' , str(_lowercase ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
UpperCamelCase = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
# Construct model
if bloom_config_file == "":
UpperCamelCase = BloomConfig()
else:
UpperCamelCase = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCamelCase = os.listdir(_lowercase )
UpperCamelCase = sorted(filter(lambda _lowercase : s.startswith('layer' ) and "model_00" in s , _lowercase ) )
UpperCamelCase = {'weight_map': {}, 'metadata': {}}
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = BloomConfig()
for j, file in enumerate(_lowercase ):
print('Processing file: {}'.format(_lowercase ) )
UpperCamelCase = None
for i in range(_lowercase ):
# load all TP files
UpperCamelCase = file.replace('model_00' , F'model_0{i}' )
UpperCamelCase = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys() )
for key in keys:
UpperCamelCase = temp.pop(_lowercase )
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCamelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCamelCase = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCamelCase = BloomConfig()
UpperCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
UpperCamelCase = total_size
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '\n'
f.write(_lowercase )
else:
UpperCamelCase = BloomModel(_lowercase )
UpperCamelCase = os.listdir(_lowercase )
UpperCamelCase = sorted(filter(lambda _lowercase : s.startswith('layer' ) and "model_00" in s , _lowercase ) )
UpperCamelCase = None
for i, file in enumerate(_lowercase ):
UpperCamelCase = None
for i in range(_lowercase ):
# load all TP files
UpperCamelCase = file.replace('model_00' , F'model_0{i}' )
UpperCamelCase = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys() )
for key in keys:
UpperCamelCase = temp.pop(_lowercase )
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase = tensors[key] / pretraining_tp
UpperCamelCase = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
UpperCamelCase = set(other_keys.missing_keys )
else:
UpperCamelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
UpperCamelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
_snake_case = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 707 |
def __lowerCamelCase ( _lowercase ) -> int:
assert (
isinstance(_lowercase , _lowercase ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 0 |
from math import isqrt, loga
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> list[int]:
"""simple docstring"""
a_ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __A , __A ):
a_ : str = False
return [i for i in range(2 , __A ) if is_prime[i]]
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] = 80_08_00 , __A : Optional[Any] = 80_08_00 ) -> int:
"""simple docstring"""
a_ : List[str] = degree * loga(__A )
a_ : int = int(__A )
a_ : Optional[Any] = calculate_prime_numbers(__A )
a_ : Any = 0
a_ : str = 0
a_ : Any = len(__A ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 570 |
"""simple docstring"""
def A_ ( lowercase ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = generate_pascal_triangle(lowercase )
for row_idx in range(lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = []
for current_row_idx in range(lowercase ):
UpperCAmelCase_ : Optional[Any] = populate_current_row(lowercase , lowercase )
triangle.append(lowercase )
return triangle
def A_ ( lowercase , lowercase ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = 1, 1
for current_col_idx in range(1 , lowercase ):
calculate_current_element(
lowercase , lowercase , lowercase , lowercase )
return current_row
def A_ ( lowercase , lowercase , lowercase , lowercase , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : str = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ : int = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ : Any = above_to_left_elt + above_to_right_elt
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = [[1]]
for row_index in range(1 , lowercase ):
UpperCAmelCase_ : Any = [0] + result[-1] + [0]
UpperCAmelCase_ : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ : Dict = sum(divmod(lowercase , 2 ) )
UpperCAmelCase_ : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ : Union[str, Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ : int = row_first_half + row_second_half
result.append(lowercase )
return result
def A_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
UpperCAmelCase_ : int = f'''{func.__name__}({value})'''
UpperCAmelCase_ : int = timeit(f'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase , lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 470 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
_lowerCAmelCase : int = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCAmelCase : int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
_lowerCAmelCase : Any = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
# load decoder from hub
_lowerCAmelCase : Union[str, Any] = "hf-internal-testing/ngram-beam-search-decoder"
def __magic_name__ ( self : Tuple , **A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __magic_name__ ( self : Tuple , **A_ : Tuple ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def __magic_name__ ( self : str , **A_ : Tuple ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : str = self.get_feature_extractor()
_lowerCAmelCase : Optional[Any] = self.get_decoder()
_lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : int = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(A_ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_decoder()
_lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : int = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : Tuple = feature_extractor(A_ , return_tensors="np" )
_lowerCAmelCase : Any = processor(A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_decoder()
_lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : int = "This is a test string"
_lowerCAmelCase : List[str] = processor(text=A_ )
_lowerCAmelCase : int = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : Optional[int] , A_ : Tuple=(2, 1_0, 1_6) , A_ : str=7_7 ):
'''simple docstring'''
np.random.seed(A_ )
return np.random.rand(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_decoder()
_lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : Tuple = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
_lowerCAmelCase : List[Any] = processor.decode(A_ )
_lowerCAmelCase : Any = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def __magic_name__ ( self : Optional[int] , A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Any = self.get_decoder()
_lowerCAmelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCAmelCase : int = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCAmelCase : List[Any] = processor.batch_decode(A_ , A_ )
_lowerCAmelCase : List[Any] = list(A_ )
with get_context("fork" ).Pool() as p:
_lowerCAmelCase : Any = decoder.decode_beams_batch(A_ , A_ )
_lowerCAmelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_feature_extractor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_decoder()
_lowerCAmelCase : int = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : List[Any] = self._get_dummy_logits()
_lowerCAmelCase : Optional[Any] = 1_5
_lowerCAmelCase : str = -20.0
_lowerCAmelCase : Any = -4.0
_lowerCAmelCase : int = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCAmelCase : Tuple = decoded_processor_out.text
_lowerCAmelCase : Optional[Any] = list(A_ )
with get_context("fork" ).Pool() as pool:
_lowerCAmelCase : Any = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCAmelCase : List[Any] = [d[0][0] for d in decoded_decoder_out]
_lowerCAmelCase : Tuple = [d[0][2] for d in decoded_decoder_out]
_lowerCAmelCase : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_feature_extractor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : int = self.get_decoder()
_lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : Any = self._get_dummy_logits()
_lowerCAmelCase : int = 2.0
_lowerCAmelCase : int = 5.0
_lowerCAmelCase : Tuple = -20.0
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[str] = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCAmelCase : List[Any] = decoded_processor_out.text
_lowerCAmelCase : Optional[int] = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context("fork" ).Pool() as pool:
_lowerCAmelCase : List[Any] = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCAmelCase : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , A_ )
_lowerCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
_lowerCAmelCase : str = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_lowerCAmelCase : Any = os.listdir(A_ )
_lowerCAmelCase : List[str] = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = snapshot_download("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCAmelCase : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
_lowerCAmelCase : int = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_lowerCAmelCase : Optional[Any] = os.listdir(A_ )
_lowerCAmelCase : Optional[Any] = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Any = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : Optional[int] = processor_wavaveca(A_ , return_tensors="np" )
_lowerCAmelCase : Optional[Any] = processor_auto(A_ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCAmelCase : Any = self._get_dummy_logits()
_lowerCAmelCase : Any = processor_wavaveca.batch_decode(A_ )
_lowerCAmelCase : Union[str, Any] = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : str = self.get_decoder()
_lowerCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def __magic_name__ ( A_ : Union[str, Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Dict = self._get_dummy_logits()[0]
_lowerCAmelCase : Union[str, Any] = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Union[str, Any] = self._get_dummy_logits()
_lowerCAmelCase : Union[str, Any] = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(A_ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __magic_name__ ( self : int ):
'''simple docstring'''
import torch
_lowerCAmelCase : Any = load_dataset("common_voice" , "en" , split="train" , streaming=A_ )
_lowerCAmelCase : Optional[int] = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
_lowerCAmelCase : Union[str, Any] = iter(A_ )
_lowerCAmelCase : Optional[int] = next(A_ )
_lowerCAmelCase : str = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
_lowerCAmelCase : List[str] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCAmelCase : List[Any] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
_lowerCAmelCase : List[str] = model(A_ ).logits.cpu().numpy()
_lowerCAmelCase : List[Any] = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCAmelCase : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCAmelCase : Optional[Any] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
_lowerCAmelCase : Optional[int] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(A_ , "word" ) ) , A_ )
self.assertEqual(" ".join(self.get_from_offsets(A_ , "word" ) ) , output.text )
# output times
_lowerCAmelCase : Dict = torch.tensor(self.get_from_offsets(A_ , "start_time" ) )
_lowerCAmelCase : Tuple = torch.tensor(self.get_from_offsets(A_ , "end_time" ) )
# fmt: off
_lowerCAmelCase : List[str] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCAmelCase : Optional[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 721 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class A__ ( A ):
"""simple docstring"""
_lowercase : List[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : List[Any] , **A_ : str ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : str = deprecated_arg[3:]
_lowerCAmelCase : List[Any] = not kwargs.pop(A_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_lowerCAmelCase : Any = kwargs.pop("tpu_name" , self.tpu_name )
_lowerCAmelCase : str = kwargs.pop("device_idx" , self.device_idx )
_lowerCAmelCase : str = kwargs.pop("eager_mode" , self.eager_mode )
_lowerCAmelCase : Tuple = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**A_ )
_lowercase : str = field(
default=A , metadata={'''help''': '''Name of TPU'''} , )
_lowercase : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowercase : bool = field(default=A , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowercase : bool = field(
default=A , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["tf"] )
_lowerCAmelCase : Any = None
if self.tpu:
try:
if self.tpu_name:
_lowerCAmelCase : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCAmelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCAmelCase : Optional[int] = None
return tpu
@cached_property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCAmelCase : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_lowerCAmelCase : Dict = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_lowerCAmelCase : Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __magic_name__ ( self : int ):
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return self.n_gpu > 0
| 503 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCAmelCase = field(
default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Any = {}
if self.train_dir is not None:
_lowerCamelCase : int = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : Tuple = self.validation_dir
_lowerCamelCase : Optional[int] = data_files if data_files else None
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCAmelCase = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
__UpperCAmelCase = field(
default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase__ ( A_ ):
__UpperCAmelCase = field(
default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_lowerCamelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
_lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split )
_lowerCamelCase : Union[str, Any] = split["""train"""]
_lowerCamelCase : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
_lowerCamelCase : List[Any] = ds["""train"""].column_names
else:
_lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : str = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : Optional[Any] = """image"""
elif "img" in column_names:
_lowerCamelCase : List[Any] = """img"""
else:
_lowerCamelCase : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase : Dict = image_processor.size["""shortest_edge"""]
else:
_lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
_lowerCamelCase : Tuple = Compose(
[
Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : Optional[Any] ):
_lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
_lowerCamelCase : Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCamelCase : Optional[Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_lowerCamelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Union[str, Any] = last_checkpoint
_lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : int = trainer.evaluate()
trainer.log_metrics("""eval""" , __snake_case )
trainer.save_metrics("""eval""" , __snake_case )
# Write model card and (optionally) push to hub
_lowerCamelCase : Optional[Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 88 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.txt'''}
__a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__a = {
'''facebook/esm2_t6_8M_UR50D''': 10_24,
'''facebook/esm2_t12_35M_UR50D''': 10_24,
}
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
with open(_UpperCamelCase, '''r''' ) as f:
lowercase : List[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<cls>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__="<eos>" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : str = load_vocab_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = dict(enumerate(self.all_tokens ) )
lowercase : Tuple = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase : Tuple = unk_token
lowercase : Optional[Any] = cls_token
lowercase : Union[str, Any] = pad_token
lowercase : Dict = mask_token
lowercase : Dict = eos_token
lowercase : Any = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return text.split()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
return len(self._id_to_token )
def __lowerCamelCase ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[str] = [self.cls_token_id]
lowercase : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase : Tuple = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE__ ) + [1]
return mask
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCamelCase ( self ):
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
return super()._add_tokens(SCREAMING_SNAKE_CASE__ , special_tokens=SCREAMING_SNAKE_CASE__ )
| 319 | 0 |
'''simple docstring'''
class A :
'''simple docstring'''
def __init__(self ) -> Tuple:
__UpperCamelCase : List[Any] = {}
def a_ (self ) -> None:
print(self.vertex )
for i in self.vertex:
print(_UpperCAmelCase , " -> " , " -> ".join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCAmelCase )
else:
# else make a new vertex
__UpperCamelCase : Dict = [to_vertex]
def a_ (self ) -> None:
# visited array for storing already visited nodes
__UpperCamelCase : List[str] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# mark start vertex as visited
__UpperCamelCase : Optional[Any] = True
print(_UpperCAmelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 399 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__UpperCamelCase : Optional[Any] = flax_key_tuple[:-1] + ("weight",)
__UpperCamelCase : str = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
__UpperCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
__UpperCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if "metadata" in layer:
__UpperCamelCase : Union[str, Any] = layer.split("metadata" )
__UpperCamelCase : str = "".join(split_layer[0] )[:-1]
__UpperCamelCase : Optional[Any] = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
__UpperCamelCase : str = layer.split("kvstore" )
__UpperCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
__UpperCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
__UpperCamelCase : int = layer.split("/" )
__UpperCamelCase : Optional[int] = "/".join(split_layer[:-1] )
__UpperCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
__UpperCamelCase : Optional[int] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__UpperCamelCase : Union[str, Any] = "file"
else:
__UpperCamelCase : Any = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = rename_keys(snake_case__ )
__UpperCamelCase : Union[str, Any] = {}
for k, v in current_block.items():
__UpperCamelCase : Tuple = v
__UpperCamelCase : Tuple = new_current_block
torch.save(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = WEIGHTS_NAME ):
__UpperCamelCase : Any = convert_file_size_to_int(snake_case__ )
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : List[str] = {}
__UpperCamelCase : Any = 0
__UpperCamelCase : int = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
__UpperCamelCase : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
__UpperCamelCase : List[Any] = flatten_dict(snake_case__ , sep="/" )
__UpperCamelCase : List[Any] = {}
for layer in checkpoint_info.keys():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
__UpperCamelCase : List[Any] = content
else:
__UpperCamelCase : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__UpperCamelCase : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__UpperCamelCase : Optional[Any] = torch.tensor(snake_case__ )
__UpperCamelCase : Optional[int] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__UpperCamelCase , __UpperCamelCase : Any = rename_base_flax_keys(tuple(key.split("/" ) ) , snake_case__ )
__UpperCamelCase : int = "/".join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__UpperCamelCase : Any = os.path.join(
snake_case__ , weights_name.replace(".bin" , F"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__UpperCamelCase : List[str] = {}
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : Tuple = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__UpperCamelCase : Any = os.path.join(snake_case__ , weights_name.replace(".bin" , F"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__UpperCamelCase : Any = {}
__UpperCamelCase : Union[str, Any] = {}
for idx, shard in enumerate(snake_case__ ):
__UpperCamelCase : Any = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
__UpperCamelCase : Union[str, Any] = os.path.join(snake_case__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
__UpperCamelCase : Union[str, Any] = shard
for key in shard:
__UpperCamelCase : List[str] = shard_file
# Add the metadata
__UpperCamelCase : Dict = {"total_size": total_size}
__UpperCamelCase : Optional[int] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" ) as f:
__UpperCamelCase : Tuple = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + "\n"
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowerCAmelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__UpperCamelCase : str = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
__UpperCamelCase : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
__UpperCamelCase : Any = TaTokenizer.from_pretrained("t5-small" )
__UpperCamelCase : int = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
__UpperCamelCase : Optional[int] = tokenizer(snake_case__ , return_tensors="pt" ).input_ids
__UpperCamelCase : int = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 399 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
lowerCamelCase : str = 0
lowerCamelCase : Any = str(lowerCAmelCase_ )
while len(lowerCAmelCase_ ) != 1:
lowerCamelCase : Dict = [int(lowerCAmelCase_ ) for i in num_string]
lowerCamelCase : Union[str, Any] = 1
for i in range(0 , len(lowerCAmelCase_ ) ):
total *= numbers[i]
lowerCamelCase : Any = str(lowerCAmelCase_ )
steps += 1
return steps
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = str(lowerCAmelCase_ )
while len(lowerCAmelCase_ ) != 1:
lowerCamelCase : str = [int(lowerCAmelCase_ ) for i in num_string]
lowerCamelCase : Dict = 0
for i in range(0 , len(lowerCAmelCase_ ) ):
total += numbers[i]
lowerCamelCase : Optional[int] = str(lowerCAmelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
# TODO Update this
__A = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : List[Any] = "esm"
def __init__( self : Tuple , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : List[Any]=3_072 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[Any]=1_026 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : str=1E-12 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Optional[int] , ) -> Any:
super().__init__(pad_token_id=__UpperCAmelCase , mask_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= initializer_range
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= use_cache
UpperCAmelCase_= emb_layer_norm_before
UpperCAmelCase_= token_dropout
UpperCAmelCase_= is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
UpperCAmelCase_= EsmFoldConfig()
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= EsmFoldConfig(**__UpperCAmelCase )
UpperCAmelCase_= esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
UpperCAmelCase_= get_default_vocab_list()
else:
UpperCAmelCase_= vocab_list
else:
UpperCAmelCase_= None
UpperCAmelCase_= None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , __UpperCAmelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_= super().to_dict()
if isinstance(self.esmfold_config , __UpperCAmelCase ):
UpperCAmelCase_= self.esmfold_config.to_dict()
return output
@dataclass
class lowercase :
"""simple docstring"""
a__ : str = None
a__ : bool = True
a__ : bool = False
a__ : bool = False
a__ : bool = False
a__ : float = 0
a__ : bool = True
a__ : bool = False
a__ : int = 128
a__ : "TrunkConfig" = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self.trunk is None:
UpperCAmelCase_= TrunkConfig()
elif isinstance(self.trunk , __UpperCAmelCase ):
UpperCAmelCase_= TrunkConfig(**self.trunk )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= asdict(self )
UpperCAmelCase_= self.trunk.to_dict()
return output
@dataclass
class lowercase :
"""simple docstring"""
a__ : int = 48
a__ : int = 1024
a__ : int = 128
a__ : int = 32
a__ : int = 32
a__ : int = 32
a__ : float = 0
a__ : float = 0
a__ : bool = False
a__ : int = 4
a__ : Optional[int] = 128
a__ : "StructureModuleConfig" = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
if self.structure_module is None:
UpperCAmelCase_= StructureModuleConfig()
elif isinstance(self.structure_module , __UpperCAmelCase ):
UpperCAmelCase_= StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase_= self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_= self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
UpperCAmelCase_= asdict(self )
UpperCAmelCase_= self.structure_module.to_dict()
return output
@dataclass
class lowercase :
"""simple docstring"""
a__ : int = 384
a__ : int = 128
a__ : int = 16
a__ : int = 128
a__ : int = 12
a__ : int = 4
a__ : int = 8
a__ : float = 0.1
a__ : int = 8
a__ : int = 1
a__ : int = 2
a__ : int = 7
a__ : int = 10
a__ : float = 1e-8
a__ : float = 1e5
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return asdict(self )
def __a ( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 593 | 0 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase_ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" )
UpperCamelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 86 |
from ..utils import DummyObject, requires_backends
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
| 86 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """lilt"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=None , __lowerCAmelCase=4 , __lowerCAmelCase=1_0_2_4 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = channel_shrink_ratio
lowerCamelCase__ = max_ad_position_embeddings
| 481 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list[int]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ = i + 1
else:
lowerCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 481 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class A__( UpperCamelCase__ ):
lowerCAmelCase = """dpr"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple=3_05_22 , __SCREAMING_SNAKE_CASE : int=7_68 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : List[Any]=30_72 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5_12 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-1_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , __SCREAMING_SNAKE_CASE : Tuple = 0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = projection_dim
__SCREAMING_SNAKE_CASE = position_embedding_type
| 717 |
"""simple docstring"""
import math
lowerCAmelCase__ =10
lowerCAmelCase__ =7
lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS
def _a ( UpperCAmelCase__ = 20 ) -> str:
__SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 690 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( a__ , unittest.TestCase ):
snake_case__ = None
snake_case__ = BloomTokenizerFast
snake_case__ = BloomTokenizerFast
snake_case__ = True
snake_case__ = False
snake_case__ = '''tokenizer_file'''
snake_case__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowerCAmelCase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowerCAmelCase = tokenizer.batch_encode_plus(_snake_case )['input_ids']
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase = 'This is a simple input'
lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase = ('This is a simple input', 'This is a pair')
lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowerCAmelCase = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_snake_case )
lowerCAmelCase = next(iter(_snake_case ) )['premise'] # pick up one data
lowerCAmelCase = list(sample_data.values() )
lowerCAmelCase = list(map(tokenizer.encode , _snake_case ) )
lowerCAmelCase = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 4 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( a : Any , a : Tuple ) ->List[Any]:
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
snake_case = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
snake_case = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
snake_case = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
snake_case = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
snake_case = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
snake_case = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
snake_case = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
snake_case = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
snake_case = key.replace('''image_encoder.module''' , '''flava.image_model''' )
snake_case = key.replace('''text_encoder.module''' , '''flava.text_model''' )
snake_case = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
snake_case = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
snake_case = key.replace('''text_projection''' , '''flava.text_projection''' )
snake_case = key.replace('''image_projection''' , '''flava.image_projection''' )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def __UpperCamelCase ( a : Tuple , a : List[str] , a : Optional[Any] , a : Tuple=None ) ->Union[str, Any]:
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(a )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(a ).eval()
snake_case = convert_dalle_checkpoint(a , a , save_checkpoint=a )
if os.path.exists(a ):
snake_case = torch.load(a , map_location='''cpu''' )
else:
snake_case = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
snake_case = upgrade_state_dict(a , a )
hf_model.load_state_dict(a )
snake_case = hf_model.state_dict()
snake_case = count_parameters(a )
snake_case = count_parameters(a ) + count_parameters(a )
assert torch.allclose(a , a , atol=1e-3 )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowercase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 342 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( UpperCAmelCase : Dict=None ):
"""simple docstring"""
if subparsers is not None:
__lowerCamelCase : List[str] = subparsers.add_parser("""test""" )
else:
__lowerCamelCase : str = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__lowerCamelCase : List[str] = script_name
else:
__lowerCamelCase : Optional[Any] = f"""--config_file={args.config_file} {script_name}"""
__lowerCamelCase : int = ["""accelerate-launch"""] + test_args.split()
__lowerCamelCase : Dict = execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : int = test_command_parser()
__lowerCamelCase : List[str] = parser.parse_args()
test_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 458 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : str = [], []
__lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase , __lowerCamelCase : List[str] = sorted_examples[0]
def is_too_big(UpperCAmelCase : Optional[Any] ):
return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase : Union[str, Any] = new_src + """ """ + src
__lowerCamelCase : str = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
return finished_src, finished_tgt
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = Path(UpperCAmelCase )
save_path.mkdir(exist_ok=UpperCAmelCase )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase , __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" )
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase )
parser.add_argument("""--save_path""" , type=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 458 | 1 |
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Dict = 1
UpperCamelCase_: List[Any] = 2
while i * i <= n:
UpperCamelCase_: Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def snake_case () -> List[Any]:
UpperCamelCase_: Any = 1
UpperCamelCase_: str = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCAmelCase__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution()) | 57 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
with open(__a ) as metadata_file:
A_ = json.load(__a )
A_ = LukeConfig(use_entity_aware_attention=__a , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__a , map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__a )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" , lstrip=__a , rstrip=__a )
A_ = AddedToken("<ent2>" , lstrip=__a , rstrip=__a )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__a , __a )
A_ = LukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'encoder.layer.{layer_index}.attention.self.'
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__a ).eval()
A_ , A_ = model.load_state_dict(__a , strict=__a )
if not (len(__a ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(__a )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__a , task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__a , entity_spans=[span] , add_prefix_space=__a , return_tensors="pt" )
A_ = model(**__a )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__a ) )
model.save_pretrained(__a )
def A_ (__a ):
'''simple docstring'''
A_ = {}
with open(__a , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__a ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 115 | 0 |
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowerCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowerCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowerCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowerCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
lowerCamelCase : str = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase : Dict = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 290 | 0 |
def _lowercase( __a : int , __a : int ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
a__ =str(bin(__a ) )
binary_number += "0" * shift_amount
return binary_number
def _lowercase( __a : int , __a : int ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
a__ =str(bin(__a ) )[2:]
if shift_amount >= len(__a ):
return "0b0"
a__ =binary_number[: len(__a ) - shift_amount]
return "0b" + shifted_binary_number
def _lowercase( __a : int , __a : int ):
if number >= 0: # Get binary representation of positive number
a__ ='0' + str(bin(__a ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
a__ =len(bin(__a )[3:] ) # Find 2's complement of number
a__ =bin(abs(__a ) - (1 << binary_number_length) )[3:]
a__ =(
'1' + '0' * (binary_number_length - len(__a )) + binary_number
)
if shift_amount >= len(__a ):
return "0b" + binary_number[0] * len(__a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | """simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__lowerCAmelCase : str = "__DUMMY_TRANSFORMERS_USER__"
__lowerCAmelCase : Dict = "Dummy User"
__lowerCAmelCase : Optional[Any] = "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"
__lowerCAmelCase : str = "https://hub-ci.huggingface.co"
__lowerCAmelCase : int = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
__lowerCAmelCase : Dict = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
__lowerCAmelCase : Optional[Any] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCamelCase__ )
@pytest.fixture
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCamelCase__ )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCamelCase__ )
@pytest.fixture
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCamelCase__ )
@pytest.fixture
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
HfFolder.save_token(lowerCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
return HfApi(endpoint=lowerCamelCase__ )
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = HfFolder.get_token()
HfFolder.save_token(lowerCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCamelCase__ )
@pytest.fixture
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def _cleanup_repo(lowerCamelCase__ ):
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@contextmanager
def _temporary_repo(lowerCamelCase__ ):
try:
yield repo_id
finally:
cleanup_repo(lowerCamelCase__ )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = f"""repo_txt_data-{int(time.time() * 10e3 )}"""
lowerCAmelCase__ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = f"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
lowerCAmelCase__ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = f"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
lowerCAmelCase__ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 644 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''efficientformer'''
def __init__( self , UpperCAmelCase = [3, 2, 6, 4] , UpperCAmelCase = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase = [True, True, True, True] , UpperCAmelCase = 4_4_8 , UpperCAmelCase = 3_2 , UpperCAmelCase = 4 , UpperCAmelCase = 7 , UpperCAmelCase = 5 , UpperCAmelCase = 8 , UpperCAmelCase = 4 , UpperCAmelCase = 0.0 , UpperCAmelCase = 1_6 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 1 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 1E-5 , UpperCAmelCase = "gelu" , UpperCAmelCase = 0.02 , UpperCAmelCase = 1E-12 , UpperCAmelCase = 2_2_4 , UpperCAmelCase = 1E-05 , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = depths
__lowerCamelCase = mlp_expansion_ratio
__lowerCamelCase = downsamples
__lowerCamelCase = dim
__lowerCamelCase = key_dim
__lowerCamelCase = attention_ratio
__lowerCamelCase = resolution
__lowerCamelCase = pool_size
__lowerCamelCase = downsample_patch_size
__lowerCamelCase = downsample_stride
__lowerCamelCase = downsample_pad
__lowerCamelCase = drop_path_rate
__lowerCamelCase = num_metaad_blocks
__lowerCamelCase = distillation
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = image_size
__lowerCamelCase = batch_norm_eps
| 571 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
return EnvironmentCommand()
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
__lowerCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , *UpperCAmelCase ):
__lowerCamelCase = accelerate_config_file
def lowerCamelCase_ ( self ):
__lowerCamelCase = """not installed"""
if is_safetensors_available():
import safetensors
__lowerCamelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__lowerCamelCase = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__lowerCamelCase = """not installed"""
__lowerCamelCase = __lowerCamelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowerCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase ):
__lowerCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__lowerCamelCase = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else f'''\t{accelerate_config}'''
)
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_torch_available():
import torch
__lowerCamelCase = torch.__version__
__lowerCamelCase = torch.cuda.is_available()
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_tf_available():
import tensorflow as tf
__lowerCamelCase = tf.__version__
try:
# deprecated in v2.1
__lowerCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowerCamelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
__lowerCamelCase = """not installed"""
__lowerCamelCase = """not installed"""
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__lowerCamelCase = flax.__version__
__lowerCamelCase = jax.__version__
__lowerCamelCase = jaxlib.__version__
__lowerCamelCase = jax.lib.xla_bridge.get_backend().platform
__lowerCamelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase ) )
return info
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 571 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gpt_neox'''
def __init__( self , _lowercase=5_0_4_3_2 , _lowercase=6_1_4_4 , _lowercase=4_4 , _lowercase=6_4 , _lowercase=2_4_5_7_6 , _lowercase="gelu" , _lowercase=0.25 , _lowercase=1_0_0_0_0 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=2_0_4_8 , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=True , _lowercase=0 , _lowercase=2 , _lowercase=False , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : Any = vocab_size
snake_case_ : int = max_position_embeddings
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : int = rotary_pct
snake_case_ : Tuple = rotary_emb_base
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Optional[int] = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : str = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Dict = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : Optional[Any] = use_parallel_residual
snake_case_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
snake_case_ : Dict = self.rope_scaling.get("""type""" , _lowercase )
snake_case_ : str = self.rope_scaling.get("""factor""" , _lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 58 |
'''simple docstring'''
import os
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def a ( UpperCamelCase_ : str ) -> int:
snake_case__ =0
snake_case__ =0
while index < len(UpperCamelCase_ ) - 1:
snake_case__ =SYMBOLS[numerals[index]]
snake_case__ =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a ( UpperCamelCase_ : int ) -> str:
snake_case__ =''
snake_case__ =num // 1000
numerals += m_count * "M"
num %= 1000
snake_case__ =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
snake_case__ =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a ( UpperCamelCase_ : str = "/p089_roman.txt" ) -> int:
snake_case__ =0
with open(os.path.dirname(UpperCamelCase_ ) + roman_numerals_filename ) as filea:
snake_case__ =filea.readlines()
for line in lines:
snake_case__ =line.strip()
snake_case__ =parse_roman_numerals(UpperCamelCase_ )
snake_case__ =generate_roman_numerals(UpperCamelCase_ )
savings += len(UpperCamelCase_ ) - len(UpperCamelCase_ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 538 | 0 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE( __UpperCamelCase = None ) -> int:
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a__ : Optional[Any] = nums[0]
for i in range(1 , len(__UpperCamelCase ) ):
a__ : Optional[Any] = nums[i]
a__ : Union[str, Any] = max(__UpperCamelCase , ans + num , __UpperCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase = int(input("""Enter number of elements : """).strip())
lowerCamelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 707 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
lowerCamelCase = {"""mobilebert-uncased""": 5_12}
lowerCamelCase = {}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Union[str, Any] = VOCAB_FILES_NAMES
A :Tuple = PRETRAINED_VOCAB_FILES_MAP
A :int = PRETRAINED_INIT_CONFIGURATION
A :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A :Optional[Any] = MobileBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
a__ : Any = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
a__ : Optional[int] = do_lower_case
a__ : Optional[Any] = strip_accents
a__ : List[Any] = tokenize_chinese_chars
a__ : Optional[Any] = normalizer_class(**__UpperCAmelCase )
a__ : Optional[Any] = do_lower_case
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
a__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : List[str] = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 207 | 0 |
from collections import defaultdict
from math import gcd
def UpperCamelCase_ ( __a = 1_500_000 ) -> int:
a__ : defaultdict = defaultdict(__a )
a__ : Optional[int] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __a , 2 ):
if gcd(__a , __a ) > 1:
continue
a__ : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__a , limit + 1 , __a ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if len(UpperCamelCase__ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
lowerCamelCase : list[float] = list(UpperCamelCase__ )
lowerCamelCase : List[str] = degree
def __add__( self , UpperCamelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowerCamelCase : Union[str, Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase__ )
else:
lowerCamelCase : Dict = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase__ )
def __sub__( self , UpperCamelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , UpperCamelCase__ ) -> Polynomial:
lowerCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> int | float:
lowerCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
lowerCamelCase : Any = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase__ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def _lowercase ( self ) -> Polynomial:
lowerCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCamelCase : Any = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ = 0 ) -> Polynomial:
lowerCamelCase : list[float] = [0] * (self.degree + 2)
lowerCamelCase : Optional[int] = constant
for i in range(self.degree + 1 ):
lowerCamelCase : List[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase__ )
def __eq__( self , UpperCamelCase__ ) -> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , UpperCamelCase__ ) -> bool:
return not self.__eq__(UpperCamelCase__ )
| 311 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A : Union[str, Any] = 2_5_0_0_0_4
__A : Optional[int] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
a__ = MBartTokenizer
a__ = MBartTokenizerFast
a__ = True
a__ = True
def _UpperCAmelCase ( self : int ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = MBartTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE = MBartTokenizer(a , keep_accents=a )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(a , legacy_format=a )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(a )
# Checks it save with the same files
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(a , legacy_format=a )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = '''facebook/mbart-large-en-ro'''
a__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
a__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
a__ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def _UpperCAmelCase ( cls : Any ) -> int:
SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
SCREAMING_SNAKE_CASE = 1
return cls
def _UpperCAmelCase ( self : Any ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
def _UpperCAmelCase ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def _UpperCAmelCase ( self : int ) -> Dict:
self.assertIn(a , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
SCREAMING_SNAKE_CASE = self.tokenizer.decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def _UpperCAmelCase ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , a )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_026, 250_001] )
def _UpperCAmelCase ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def _UpperCAmelCase ( self : int ) -> int:
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _UpperCAmelCase ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = targets["""input_ids"""]
SCREAMING_SNAKE_CASE = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3_034, 2, 250_004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 710 |
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , a : int = 0 ) -> str:
SCREAMING_SNAKE_CASE = key
def _UpperCAmelCase ( self : Tuple , a : str , a : int ) -> list[str]:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a ) ^ key ) for ch in content]
def _UpperCAmelCase ( self : Union[str, Any] , a : str , a : int ) -> list[str]:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a ) ^ key ) for ch in content]
def _UpperCAmelCase ( self : Union[str, Any] , a : str , a : int = 0 ) -> str:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE = """"""
for ch in content:
ans += chr(ord(a ) ^ key )
return ans
def _UpperCAmelCase ( self : Dict , a : str , a : int = 0 ) -> str:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE = """"""
for ch in content:
ans += chr(ord(a ) ^ key )
return ans
def _UpperCAmelCase ( self : Optional[int] , a : str , a : int = 0 ) -> bool:
assert isinstance(a , a ) and isinstance(a , a )
try:
with open(a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(a , a ) )
except OSError:
return False
return True
def _UpperCAmelCase ( self : Dict , a : str , a : int ) -> bool:
assert isinstance(a , a ) and isinstance(a , a )
try:
with open(a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(a , a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 450 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase = "bert-base-cased"
UpperCamelCase = "fp16"
UpperCamelCase = "bf16"
UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :List[str] ):
super().setUp()
UpperCamelCase__ :str = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :List[Any] = f"""{i + 1}"""
UpperCamelCase__ :List[Any] = strategy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Optional[int] = prefetch_policy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Tuple = state_dict_type
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = AutoModel.from_pretrained(lowerCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :int = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase__ :Optional[Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase__ :Union[str, Any] = """2000"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :int = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :str = """TRANSFORMER_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """T5Layer"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Any = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :int = """SIZE_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """0"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :Dict = mp_dtype
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase__ :Tuple = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase__ :Tuple = torch.bfloataa
UpperCamelCase__ :int = MixedPrecision(param_dtype=lowerCamelCase__ , reduce_dtype=lowerCamelCase__ , buffer_dtype=lowerCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowerCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase__ )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase__ :List[str] = self.dist_env.copy()
UpperCamelCase__ :Dict = str(lowerCamelCase__ ).lower()
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
super().setUp()
UpperCamelCase__ :str = 0.82
UpperCamelCase__ :int = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCamelCase__ :int = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase__ :Optional[Any] = 1_60
UpperCamelCase__ :List[str] = 1_60
UpperCamelCase__ :Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ :Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def __a ( self :str ):
UpperCamelCase__ :int = os.path.join(self.test_scripts_folder , """test_performance.py""" )
UpperCamelCase__ :List[str] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCamelCase__ :Optional[Any] = cmd.copy()
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :str ):
UpperCamelCase__ :List[Any] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
UpperCamelCase__ :Any = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCamelCase__ :Optional[int] = len(lowerCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase__ :Tuple = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
UpperCamelCase__ :List[Any] = cmd_config[:-1]
UpperCamelCase__ :Tuple = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
UpperCamelCase__ :Optional[int] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase__ :Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) | 45 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 2000 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) -> Union[ImagePipelineOutput, Tuple]:
A_ : str = self.unet.config.sample_size
A_ : int = (batch_size, 3, img_size, img_size)
A_ : Dict = self.unet
A_ : int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase ) * self.scheduler.init_noise_sigma
A_ : int = sample.to(self.device )
self.scheduler.set_timesteps(_lowerCamelCase )
self.scheduler.set_sigmas(_lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A_ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
A_ : Tuple = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
A_ : List[str] = self.scheduler.step_correct(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# prediction step
A_ : str = model(_lowerCamelCase , _lowerCamelCase ).sample
A_ : Union[str, Any] = self.scheduler.step_pred(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
A_ : str = output.prev_sample, output.prev_sample_mean
A_ : List[str] = sample_mean.clamp(0 , 1 )
A_ : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Tuple = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 708 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : int = create_tensor(a_ )
A_ : Any = gather(a_ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[str] = [state.process_index]
A_ : Optional[Any] = gather_object(a_ )
assert len(a_ ) == state.num_processes, F"{gathered_obj}, {len(a_ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
A_ : List[str] = create_tensor(a_ )
A_ : Optional[Any] = broadcast(a_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.is_main_process:
A_ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : Any = torch.arange(state.num_processes ).to(state.device )
A_ : Union[str, Any] = pad_across_processes(a_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : Tuple = create_tensor(a_ )
A_ : Optional[Any] = reduce(a_ , """sum""" )
A_ : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : str = create_tensor(a_ )
A_ : int = reduce(a_ , """mean""" )
A_ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
main()
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : Union[str, Any] = PartialState()
state.print(F"State: {state}" )
state.print("""testing gather""" )
test_gather(a_ )
state.print("""testing gather_object""" )
test_gather_object(a_ )
state.print("""testing broadcast""" )
test_broadcast(a_ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(a_ )
state.print("""testing reduce_sum""" )
test_reduce_sum(a_ )
state.print("""testing reduce_mean""" )
test_reduce_mean(a_ )
if __name__ == "__main__":
main()
| 385 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__SCREAMING_SNAKE_CASE = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowercase = True
_lowercase = None
# Automatically constructed
_lowercase = "PIL.Image.Image"
_lowercase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_lowercase = field(default='Image' , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def __call__( self ):
return self.pa_type
def __lowerCamelCase ( self , __UpperCAmelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict =np.array(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(__UpperCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ : List[str] ={}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str =PIL.Image.open(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[str] =path.split('::' )[-1]
try:
SCREAMING_SNAKE_CASE_ : Tuple =string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE_ : List[Any] =token_per_repo_id.get(__UpperCAmelCase )
except ValueError:
SCREAMING_SNAKE_CASE_ : Any =None
with xopen(__UpperCAmelCase , 'rb' , use_auth_token=__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_ : str =BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ : Any =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCamelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowerCamelCase ( self , __UpperCAmelCase ):
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ : int =pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ : int =pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : Tuple =pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =storage.field('bytes' )
else:
SCREAMING_SNAKE_CASE_ : Dict =pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
SCREAMING_SNAKE_CASE_ : List[Any] =storage.field('path' )
else:
SCREAMING_SNAKE_CASE_ : Any =pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : str =pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =pa.array(
[encode_np_array(np.array(__UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : Tuple =pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def __lowerCamelCase ( self , __UpperCAmelCase ):
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase ):
with xopen(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] =f.read()
return bytes_
SCREAMING_SNAKE_CASE_ : Tuple =pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ : int =pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ : Dict =pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ : str =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ) -> bytes:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ : Optional[Any] =image.format
else:
SCREAMING_SNAKE_CASE_ : Any ='PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(snake_case__ ,format=snake_case__ )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> dict:
"""simple docstring"""
if hasattr(snake_case__ ,'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[int] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
SCREAMING_SNAKE_CASE_ : Any =array.dtype
SCREAMING_SNAKE_CASE_ : Any =dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ : Any =dtype.kind
SCREAMING_SNAKE_CASE_ : List[str] =dtype.itemsize
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ : List[str] =np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ : int =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ : Any =dtype_byteorder + dtype_kind + str(snake_case__ )
SCREAMING_SNAKE_CASE_ : int =np.dtype(snake_case__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
SCREAMING_SNAKE_CASE_ : int =PIL.Image.fromarray(array.astype(snake_case__ ) )
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =first_non_null_value(snake_case__ )
if isinstance(snake_case__ ,snake_case__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(snake_case__ ,np.ndarray ):
SCREAMING_SNAKE_CASE_ : Dict =no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
elif isinstance(snake_case__ ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : Optional[int] =no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
else:
return objs
else:
return objs
| 220 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
# Initialise PyTorch model
__UpperCAmelCase =TaConfig.from_json_file(snake_case__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCAmelCase =TaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 132 | 0 |
def UpperCamelCase ( _a , _a , _a ) -> Union[str, Any]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_a , n - 1 , _a ) * a) % mod
else:
lowercase_ :Optional[int] = binary_exponentiation(_a , n / 2 , _a )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : List[str] = 701
SCREAMING_SNAKE_CASE : Optional[Any] = 1_000_000_000
SCREAMING_SNAKE_CASE : Union[str, Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 441 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 441 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parent
lowerCAmelCase__ :Dict = batch_size
lowerCAmelCase__ :List[Any] = seq_length
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Dict = use_input_mask
lowerCAmelCase__ :int = use_token_type_ids
lowerCAmelCase__ :Dict = use_labels
lowerCAmelCase__ :Any = vocab_size
lowerCAmelCase__ :str = hidden_size
lowerCAmelCase__ :Union[str, Any] = num_hidden_layers
lowerCAmelCase__ :int = num_attention_heads
lowerCAmelCase__ :Dict = intermediate_size
lowerCAmelCase__ :Tuple = hidden_act
lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :List[str] = type_vocab_size
lowerCAmelCase__ :Tuple = type_sequence_label_size
lowerCAmelCase__ :Union[str, Any] = initializer_range
lowerCAmelCase__ :List[str] = num_labels
lowerCAmelCase__ :Optional[int] = num_choices
lowerCAmelCase__ :Optional[int] = scope
lowerCAmelCase__ :Tuple = projection_dim
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ :Dict = None
if self.use_token_type_ids:
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :Optional[Any] = None
lowerCAmelCase__ :List[Any] = None
if self.use_labels:
lowerCAmelCase__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCAmelCase__ :List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = TFDPRContextEncoder(config=_lowerCAmelCase )
lowerCAmelCase__ :Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowerCAmelCase__ :Any = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowerCAmelCase__ :int = model(_lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = TFDPRQuestionEncoder(config=_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowerCAmelCase__ :Tuple = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowerCAmelCase__ :List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = TFDPRReader(config=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) :Tuple = config_and_inputs
lowerCAmelCase__ :Any = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
"""simple docstring"""
A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
A = False
A = False
A = False
A = False
A = False
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = TFDPRModelTester(self )
lowerCAmelCase__ :Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Optional[int] = TFDPRContextEncoder.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :int = TFDPRContextEncoder.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :List[Any] = TFDPRQuestionEncoder.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :str = TFDPRReader.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase__ :Union[str, Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ :Optional[Any] = model(_lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ :Optional[int] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 145 |
from math import isclose, sqrt
def snake_case__ ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
lowerCAmelCase__ :Optional[int] = point_y / 4 / point_x
lowerCAmelCase__ :Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ :List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ :Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ :int = outgoing_gradient**2 + 4
lowerCAmelCase__ :List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
lowerCAmelCase__ :Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ :List[str] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ :Dict = x_minus if isclose(UpperCAmelCase , UpperCAmelCase ) else x_plus
lowerCAmelCase__ :List[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case__ ( UpperCAmelCase : float = 1.4 , UpperCAmelCase : float = -9.6 ):
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :float = first_x_coord
lowerCAmelCase__ :float = first_y_coord
lowerCAmelCase__ :float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :List[str] = next_point(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 145 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict = 1_3 , SCREAMING_SNAKE_CASE : int = 6_4 , SCREAMING_SNAKE_CASE : Union[str, Any] = 2 , SCREAMING_SNAKE_CASE : Dict = 3 , SCREAMING_SNAKE_CASE : Union[str, Any] = 3 , SCREAMING_SNAKE_CASE : Dict = True , SCREAMING_SNAKE_CASE : Dict = True , SCREAMING_SNAKE_CASE : Dict = 1_2_8 , SCREAMING_SNAKE_CASE : Optional[Any]=[1_6, 3_2, 6_4, 1_2_8] , SCREAMING_SNAKE_CASE : Tuple = 7 , SCREAMING_SNAKE_CASE : Tuple = 4 , SCREAMING_SNAKE_CASE : List[str] = 3_7 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : Dict = 0.1 , SCREAMING_SNAKE_CASE : Any = 0.1 , SCREAMING_SNAKE_CASE : Dict = 1_0 , SCREAMING_SNAKE_CASE : Optional[int] = 0.02 , SCREAMING_SNAKE_CASE : Dict = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Any = 1_2_8 , SCREAMING_SNAKE_CASE : Optional[Any] = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE : List[Any] = 2 , SCREAMING_SNAKE_CASE : Tuple = 2 , ) -> str:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = encoder_stride
__snake_case = num_attention_outputs
__snake_case = embed_dim
__snake_case = embed_dim + 1
__snake_case = resolution
__snake_case = depths
__snake_case = hidden_sizes
__snake_case = dim
__snake_case = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
__snake_case = TFEfficientFormerModel(config=_lowercase )
__snake_case = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case = self.type_sequence_label_size
__snake_case = TFEfficientFormerForImageClassification(_lowercase )
__snake_case = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = TFEfficientFormerForImageClassification(_lowercase )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case = config_and_inputs
__snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase( __snake_case , __snake_case , unittest.TestCase ):
snake_case_ : List[Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
snake_case_ : Any = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : Optional[int] = False
snake_case_ : Union[str, Any] = False
snake_case_ : Tuple = False
snake_case_ : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case = TFEfficientFormerModelTester(self )
__snake_case = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_lowercase )
__snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
__snake_case = model_class(_lowercase )
__snake_case = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , "encoder_seq_length" ):
__snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
__snake_case = seq_length * self.model_tester.chunk_length
else:
__snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__snake_case = getattr(self.model_tester , "seq_length" , _lowercase )
__snake_case = getattr(self.model_tester , "decoder_seq_length" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=False ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = getattr(self.model_tester , "seq_length" , _lowercase )
__snake_case = getattr(self.model_tester , "encoder_seq_length" , _lowercase )
__snake_case = getattr(self.model_tester , "key_length" , _lowercase )
__snake_case = getattr(self.model_tester , "chunk_length" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
__snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(_lowercase )
__snake_case = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(_lowercase )
__snake_case = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def _lowerCAmelCase ( ) -> int:
'''simple docstring'''
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowercase , return_tensors="tf" )
# forward pass
__snake_case = model(**_lowercase , training=_lowercase )
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
__snake_case = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowercase , return_tensors="tf" )
# forward pass
__snake_case = model(**_lowercase , training=_lowercase )
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
__snake_case = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 719 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A : Any = logging.get_logger(__name__)
class UpperCamelCase( _a ):
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 473 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : torch.FloatTensor
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Tuple , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_ : Tuple[int] = (64,) , UpperCamelCase_ : int = 1 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : int = 2_56 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : float = 0.1_8215 , UpperCamelCase_ : str = "group" , ) -> Tuple:
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ :Dict = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :List[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ :List[Any] = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
SCREAMING_SNAKE_CASE__ :Dict = VectorQuantizer(UpperCamelCase_ , UpperCamelCase_ , beta=0.25 , remap=UpperCamelCase_ , sane_index_shape=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ :str = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , norm_type=UpperCamelCase_ , )
@apply_forward_hook
def __lowerCamelCase ( self : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> VQEncoderOutput:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.encoder(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = self.quant_conv(UpperCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase_ )
@apply_forward_hook
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.quantize(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = h
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.post_quant_conv(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = self.decoder(UpperCamelCase_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
SCREAMING_SNAKE_CASE__ :str = sample
SCREAMING_SNAKE_CASE__ :List[Any] = self.encode(UpperCamelCase_ ).latents
SCREAMING_SNAKE_CASE__ :List[str] = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
| 209 | '''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase_ = datasets.logging.get_logger(__name__)
UpperCamelCase_ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCamelCase_ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCamelCase_ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : Tuple ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : List[str] ) -> Dict:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ :List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
SCREAMING_SNAKE_CASE__ :List[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=False ) -> Any:
if gpus is None:
SCREAMING_SNAKE_CASE__ :Dict = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ :Dict = {'src': sources, 'mt': predictions, 'ref': references}
SCREAMING_SNAKE_CASE__ :List[str] = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 209 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.