code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''roformer'''
def __init__( self : Dict , A : int=5_00_00 , A : Optional[int]=None , A : Optional[int]=7_68 , A : int=12 , A : Union[str, Any]=12 , A : List[Any]=30_72 , A : str="gelu" , A : List[str]=0.1 , A : str=0.1 , A : Optional[Any]=15_36 , A : Tuple=2 , A : int=0.0_2 , A : List[Any]=1E-12 , A : Dict=0 , A : Tuple=False , A : List[Any]=True , **A : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A , **A)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = rotary_value
_UpperCAmelCase = use_cache
class __lowerCAmelCase ( A ):
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 639
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 1
|
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
'''simple docstring'''
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase = int(max(0 , i - limit ) )
_UpperCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase = F"{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase = 0.0
else:
_UpperCAmelCase = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __lowerCAmelCase :
def __init__( self : str , A : Optional[int] , A : List[str] , A : Any , A : str , A : Dict , A : Union[str, Any]=0.2 , A : int=0.2) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = bp_numa
_UpperCAmelCase = bp_numa
_UpperCAmelCase = bp_numa
_UpperCAmelCase = conva_get[:2]
_UpperCAmelCase = conva_get[2]
_UpperCAmelCase = size_pa
_UpperCAmelCase = rate_w
_UpperCAmelCase = rate_t
_UpperCAmelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_UpperCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_UpperCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_UpperCAmelCase = -2 * np.random.rand(self.conva[1]) + 1
_UpperCAmelCase = -2 * np.random.rand(self.num_bpa) + 1
_UpperCAmelCase = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self : List[Any] , A : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(A , 'wb') as f:
pickle.dump(A , A)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls : Tuple , A : Optional[Any]) -> Any:
"""simple docstring"""
with open(A , 'rb') as f:
_UpperCAmelCase = pickle.load(A) # noqa: S301
_UpperCAmelCase = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
_UpperCAmelCase = model_dic.get('size_pooling1')
_UpperCAmelCase = model_dic.get('num_bp1')
_UpperCAmelCase = model_dic.get('num_bp2')
_UpperCAmelCase = model_dic.get('num_bp3')
_UpperCAmelCase = model_dic.get('rate_weight')
_UpperCAmelCase = model_dic.get('rate_thre')
# create model instance
_UpperCAmelCase = CNN(A , A , A , A , A , A , A)
# modify model parameter
_UpperCAmelCase = model_dic.get('w_conv1')
_UpperCAmelCase = model_dic.get('wkj')
_UpperCAmelCase = model_dic.get('vji')
_UpperCAmelCase = model_dic.get('thre_conv1')
_UpperCAmelCase = model_dic.get('thre_bp2')
_UpperCAmelCase = model_dic.get('thre_bp3')
return conv_ins
def _lowerCamelCase ( self : Dict , A : int) -> Dict:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self : Tuple , A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return round(A , 3)
def _lowerCamelCase ( self : Optional[int] , A : Optional[Any] , A : Optional[Any] , A : List[str] , A : str , A : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = convs[0]
_UpperCAmelCase = convs[1]
_UpperCAmelCase = np.shape(A)[0]
# get the data slice of original image data, data_focus
_UpperCAmelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , A):
for j_focus in range(0 , size_data - size_conv + 1 , A):
_UpperCAmelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A)
# calculate the feature map of every single kernel, and saved as list of matrix
_UpperCAmelCase = []
_UpperCAmelCase = int((size_data - size_conv) / conv_step + 1)
for i_map in range(A):
_UpperCAmelCase = []
for i_focus in range(len(A)):
_UpperCAmelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(A))
_UpperCAmelCase = np.asmatrix(A).reshape(
A , A)
data_featuremap.append(A)
# expanding the data slice to One dimenssion
_UpperCAmelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A))
_UpperCAmelCase = np.asarray(A)
return focus_list, data_featuremap
def _lowerCamelCase ( self : Tuple , A : Tuple , A : List[str] , A : int="average_pool") -> str:
"""simple docstring"""
_UpperCAmelCase = len(featuremaps[0])
_UpperCAmelCase = int(size_map / size_pooling)
_UpperCAmelCase = []
for i_map in range(len(A)):
_UpperCAmelCase = featuremaps[i_map]
_UpperCAmelCase = []
for i_focus in range(0 , A , A):
for j_focus in range(0 , A , A):
_UpperCAmelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A))
_UpperCAmelCase = np.asmatrix(A).reshape(A , A)
featuremap_pooled.append(A)
return featuremap_pooled
def _lowerCamelCase ( self : List[str] , A : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(len(A)):
_UpperCAmelCase = np.shape(data[i])
_UpperCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1])
_UpperCAmelCase = data_listed.getA().tolist()[0]
data_expanded.extend(A)
_UpperCAmelCase = np.asarray(A)
return data_expanded
def _lowerCamelCase ( self : Optional[int] , A : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = np.asarray(A)
_UpperCAmelCase = np.shape(A)
_UpperCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self : int , A : Optional[int] , A : Optional[Any] , A : int , A : Any , A : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 0
for i_map in range(A):
_UpperCAmelCase = np.ones((size_map, size_map))
for i in range(0 , A , A):
for j in range(0 , A , A):
_UpperCAmelCase = pd_pool[
i_pool
]
_UpperCAmelCase = i_pool + 1
_UpperCAmelCase = np.multiply(
A , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(A)
return pd_all
def _lowerCamelCase ( self : Dict , A : List[Any] , A : List[str] , A : Dict , A : Dict , A : List[str] , A : Any=bool) -> Any:
"""simple docstring"""
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(A)))
print((' - - Shape: Teach_Data ', np.shape(A)))
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
_UpperCAmelCase = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(A)):
# print('------------Learning Image: %d--------------'%p)
_UpperCAmelCase = np.asmatrix(datas_train[p])
_UpperCAmelCase = np.asarray(datas_teach[p])
_UpperCAmelCase , _UpperCAmelCase = self.convolute(
A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCAmelCase = self.pooling(A , self.size_poolinga)
_UpperCAmelCase = np.shape(A)
_UpperCAmelCase = self._expand(A)
_UpperCAmelCase = data_bp_input
_UpperCAmelCase = np.dot(A , self.vji.T) - self.thre_bpa
_UpperCAmelCase = self.sig(A)
_UpperCAmelCase = np.dot(A , self.wkj.T) - self.thre_bpa
_UpperCAmelCase = self.sig(A)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_UpperCAmelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(A , (1 - bp_outa)))
_UpperCAmelCase = np.multiply(
np.dot(A , self.wkj) , np.multiply(A , (1 - bp_outa)))
_UpperCAmelCase = np.dot(A , self.vji)
_UpperCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
_UpperCAmelCase = pd_conva_pooled.T.getA().tolist()
_UpperCAmelCase = self._calculate_gradient_from_pool(
A , A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_UpperCAmelCase = self._expand_mat(pd_conva_all[k_conv])
_UpperCAmelCase = self.rate_weight * np.dot(A , A)
_UpperCAmelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_UpperCAmelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_UpperCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_UpperCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_UpperCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre
_UpperCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_UpperCAmelCase = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_UpperCAmelCase = rp + 1
_UpperCAmelCase = error_count / patterns
all_mse.append(A)
def draw_error():
_UpperCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(A , '+-')
plt.plot(A , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(A , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self : Dict , A : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(A)))
for p in range(len(A)):
_UpperCAmelCase = np.asmatrix(datas_test[p])
_UpperCAmelCase , _UpperCAmelCase = self.convolute(
A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCAmelCase = self.pooling(A , self.size_poolinga)
_UpperCAmelCase = self._expand(A)
_UpperCAmelCase = data_bp_input
_UpperCAmelCase = bp_outa * self.vji.T - self.thre_bpa
_UpperCAmelCase = self.sig(A)
_UpperCAmelCase = bp_outa * self.wkj.T - self.thre_bpa
_UpperCAmelCase = self.sig(A)
produce_out.extend(bp_outa.getA().tolist())
_UpperCAmelCase = [list(map(self.do_round , A)) for each in produce_out]
return np.asarray(A)
def _lowerCamelCase ( self : Tuple , A : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = np.asmatrix(A)
_UpperCAmelCase , _UpperCAmelCase = self.convolute(
A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCAmelCase = self.pooling(A , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 639
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 1
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : str , _UpperCAmelCase : set , _UpperCAmelCase : set , _UpperCAmelCase : dict , _UpperCAmelCase : dict , _UpperCAmelCase : PriorityQueue , _UpperCAmelCase : dict , _UpperCAmelCase : float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase = cst_fwd.get(_UpperCAmelCase , np.inf )
_UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase = new_cost_f
_UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : dict , _UpperCAmelCase : dict ) -> int:
'''simple docstring'''
_UpperCAmelCase = -1
_UpperCAmelCase = set()
_UpperCAmelCase = set()
_UpperCAmelCase = {source: 0}
_UpperCAmelCase = {destination: 0}
_UpperCAmelCase = {source: None}
_UpperCAmelCase = {destination: None}
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase , _UpperCAmelCase = queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_UpperCAmelCase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
_UpperCAmelCase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase = shortest_distance
return shortest_path_distance
UpperCAmelCase__ = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
UpperCAmelCase__ = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPFeatureExtractor"]
UpperCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : List[str]=13 , A : List[str]=7 , A : List[Any]=True , A : List[Any]=True , A : int=True , A : Union[str, Any]=True , A : Dict=99 , A : int=16 , A : Optional[int]=36 , A : List[Any]=6 , A : int=6 , A : Optional[int]=6 , A : str=37 , A : int="gelu" , A : int=0.1 , A : List[Any]=0.1 , A : str=5_12 , A : str=16 , A : List[Any]=2 , A : Optional[int]=0.0_2 , A : Any=3 , A : List[Any]=4 , A : Optional[int]=None , ) -> str:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCamelCase ( self : Optional[int] , A : Optional[int] , A : str , A : int , A : Tuple , A : Union[str, Any] , A : List[Any] , A : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = AlbertModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A)
_UpperCAmelCase = model(A , token_type_ids=A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCamelCase ( self : List[Any] , A : List[str] , A : Any , A : Tuple , A : List[Any] , A : int , A : List[str] , A : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = AlbertForPreTraining(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def _lowerCamelCase ( self : Optional[Any] , A : Any , A : Any , A : Dict , A : Optional[Any] , A : int , A : int , A : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = AlbertForMaskedLM(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Tuple , A : List[str] , A : List[Any] , A : Tuple , A : Any , A : str , A : List[Any] , A : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AlbertForQuestionAnswering(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : List[str] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : Any , A : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForSequenceClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any] , A : Optional[Any] , A : int , A : Tuple , A : Any , A : Union[str, Any] , A : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForTokenClassification(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : Optional[int] , A : Union[str, Any] , A : Optional[int] , A : Tuple , A : List[str] , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = AlbertForMultipleChoice(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any] , A : Optional[int] , A : Tuple=False) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = super()._prepare_for_class(A , A , return_labels=A)
if return_labels:
if model_class in get_values(A):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A)
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A)
return inputs_dict
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = AlbertModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A)
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A)
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*A)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AlbertModel.from_pretrained(A)
self.assertIsNotNone(A)
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = AlbertModel.from_pretrained('albert-base-v2')
_UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCAmelCase = model(A , attention_mask=A)[0]
_UpperCAmelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , A)
_UpperCAmelCase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4))
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = 42
class __lowerCAmelCase ( A , A ):
@register_to_config
def __init__( self : Optional[Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 3 , A : int = 32 , A : int = 2_56 , A : int = 32 , A : Optional[int] = None , A : float = 0.1_8_2_1_5 , A : str = "group" , ) -> Dict:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_UpperCAmelCase = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
_UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_UpperCAmelCase = nn.Convad(A , A , 1)
_UpperCAmelCase = VectorQuantizer(A , A , beta=0.2_5 , remap=A , sane_index_shape=A)
_UpperCAmelCase = nn.Convad(A , A , 1)
# pass init params to Decoder
_UpperCAmelCase = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , norm_type=A , )
@apply_forward_hook
def _lowerCamelCase ( self : str , A : torch.FloatTensor , A : bool = True) -> VQEncoderOutput:
"""simple docstring"""
_UpperCAmelCase = self.encoder(A)
_UpperCAmelCase = self.quant_conv(A)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=A)
@apply_forward_hook
def _lowerCamelCase ( self : Optional[int] , A : torch.FloatTensor , A : bool = False , A : bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.quantize(A)
else:
_UpperCAmelCase = h
_UpperCAmelCase = self.post_quant_conv(A)
_UpperCAmelCase = self.decoder(A , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=A)
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
_UpperCAmelCase = sample
_UpperCAmelCase = self.encode(A).latents
_UpperCAmelCase = self.decode(A).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A)
| 639
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase__ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ = "ResNetConfig"
# Base docstring
UpperCAmelCase__ = "microsoft/resnet-50"
UpperCAmelCase__ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase__ = "microsoft/resnet-50"
UpperCAmelCase__ = "tiger cat"
UpperCAmelCase__ = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : int , A : int , A : int = 3 , A : int = 1 , A : str = "relu") -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(
A , A , kernel_size=A , stride=A , padding=kernel_size // 2 , bias=A)
_UpperCAmelCase = nn.BatchNormad(A)
_UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCamelCase ( self : Optional[int] , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = self.convolution(A)
_UpperCAmelCase = self.normalization(A)
_UpperCAmelCase = self.activation(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , A : ResNetConfig) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
_UpperCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
_UpperCAmelCase = config.num_channels
def _lowerCamelCase ( self : str , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
_UpperCAmelCase = self.embedder(A)
_UpperCAmelCase = self.pooler(A)
return embedding
class __lowerCAmelCase ( nn.Module ):
def __init__( self : int , A : int , A : int , A : int = 2) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(A , A , kernel_size=1 , stride=A , bias=A)
_UpperCAmelCase = nn.BatchNormad(A)
def _lowerCamelCase ( self : Optional[int] , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = self.convolution(A)
_UpperCAmelCase = self.normalization(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , A : int , A : int , A : int = 1 , A : str = "relu") -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = (
ResNetShortCut(A , A , stride=A) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
ResNetConvLayer(A , A , stride=A) , ResNetConvLayer(A , A , activation=A) , )
_UpperCAmelCase = ACTaFN[activation]
def _lowerCamelCase ( self : Tuple , A : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(A)
_UpperCAmelCase = self.shortcut(A)
hidden_state += residual
_UpperCAmelCase = self.activation(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : str , A : int , A : int , A : int = 1 , A : str = "relu" , A : int = 4) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = out_channels // reduction
_UpperCAmelCase = (
ResNetShortCut(A , A , stride=A) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
ResNetConvLayer(A , A , kernel_size=1) , ResNetConvLayer(A , A , stride=A) , ResNetConvLayer(A , A , kernel_size=1 , activation=A) , )
_UpperCAmelCase = ACTaFN[activation]
def _lowerCamelCase ( self : List[Any] , A : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(A)
_UpperCAmelCase = self.shortcut(A)
hidden_state += residual
_UpperCAmelCase = self.activation(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , A : ResNetConfig , A : int , A : int , A : int = 2 , A : int = 2 , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_UpperCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A , A , stride=A , activation=config.hidden_act) , *[layer(A , A , activation=config.hidden_act) for _ in range(depth - 1)] , )
def _lowerCamelCase ( self : str , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Any , A : ResNetConfig) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(A , config.depths[1:]):
self.stages.append(ResNetStage(A , A , A , depth=A))
def _lowerCamelCase ( self : Dict , A : Tensor , A : bool = False , A : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
_UpperCAmelCase = stage_module(A)
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=A , hidden_states=A , )
class __lowerCAmelCase ( A ):
UpperCamelCase = ResNetConfig
UpperCamelCase = '''resnet'''
UpperCamelCase = '''pixel_values'''
UpperCamelCase = True
def _lowerCamelCase ( self : Any , A : int) -> str:
"""simple docstring"""
if isinstance(A , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(A , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def _lowerCamelCase ( self : List[str] , A : List[Any] , A : Union[str, Any]=False) -> str:
"""simple docstring"""
if isinstance(A , A):
_UpperCAmelCase = value
UpperCAmelCase__ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , A , )
class __lowerCAmelCase ( A ):
def __init__( self : List[Any] , A : Any) -> str:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = config
_UpperCAmelCase = ResNetEmbeddings(A)
_UpperCAmelCase = ResNetEncoder(A)
_UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self : Optional[int] , A : Tensor , A : Optional[bool] = None , A : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.embedder(A)
_UpperCAmelCase = self.encoder(
A , output_hidden_states=A , return_dict=A)
_UpperCAmelCase = encoder_outputs[0]
_UpperCAmelCase = self.pooler(A)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A , )
class __lowerCAmelCase ( A ):
def __init__( self : Any , A : Any) -> int:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = ResNetModel(A)
# classification head
_UpperCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self : Any , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.resnet(A , output_hidden_states=A , return_dict=A)
_UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase = self.classifier(A)
_UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase = 'single_label_classification'
else:
_UpperCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_UpperCAmelCase = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze())
else:
_UpperCAmelCase = loss_fct(A , A)
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase = BCEWithLogitsLoss()
_UpperCAmelCase = loss_fct(A , A)
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A , logits=A , hidden_states=outputs.hidden_states)
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , A , )
class __lowerCAmelCase ( A , A ):
def __init__( self : Dict , A : List[str]) -> Any:
"""simple docstring"""
super().__init__(A)
super()._init_backbone(A)
_UpperCAmelCase = [config.embedding_size] + config.hidden_sizes
_UpperCAmelCase = ResNetEmbeddings(A)
_UpperCAmelCase = ResNetEncoder(A)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A)
@replace_return_docstrings(output_type=A , config_class=_CONFIG_FOR_DOC)
def _lowerCamelCase ( self : Optional[int] , A : Tensor , A : Optional[bool] = None , A : Optional[bool] = None) -> BackboneOutput:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = self.embedder(A)
_UpperCAmelCase = self.encoder(A , output_hidden_states=A , return_dict=A)
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A , )
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , A : int , A : Any , A : Tuple="replace" , A : Any="<s>" , A : List[Any]="</s>" , A : int="</s>" , A : List[Any]="<s>" , A : Any="<unk>" , A : Tuple="<pad>" , A : Optional[Any]="<mask>" , A : str=False , **A : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else bos_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else eos_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else sep_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else cls_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else unk_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding='utf-8') as vocab_handle:
_UpperCAmelCase = json.load(A)
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding='utf-8') as merges_handle:
_UpperCAmelCase = merges_handle.read().split('\n')[1:-1]
_UpperCAmelCase = [tuple(merge.split()) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
return len(self.encoder)
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self : Optional[Any] , A : Dict) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(A)
_UpperCAmelCase = get_pairs(A)
if not pairs:
return token
while True:
_UpperCAmelCase = min(A , key=lambda A: self.bpe_ranks.get(A , float('inf')))
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(A):
try:
_UpperCAmelCase = word.index(A , A)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCAmelCase = j
if word[i] == first and i < len(A) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCAmelCase = tuple(A)
_UpperCAmelCase = new_word
if len(A) == 1:
break
else:
_UpperCAmelCase = get_pairs(A)
_UpperCAmelCase = ' '.join(A)
_UpperCAmelCase = word
return word
def _lowerCamelCase ( self : Optional[int] , A : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = []
for token in re.findall(self.pat , A):
_UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A).split(' '))
return bpe_tokens
def _lowerCamelCase ( self : Any , A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(A , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self : str , A : int) -> Union[str, Any]:
"""simple docstring"""
return self.decoder.get(A)
def _lowerCamelCase ( self : List[Any] , A : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ''.join(A)
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def _lowerCamelCase ( self : Any , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A) + '\n')
_UpperCAmelCase = 0
with open(A , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!')
_UpperCAmelCase = token_index
writer.write(' '.join(A) + '\n')
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self : Dict , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A)
if token_ids_a is None:
return [1] + ([0] * len(A)) + [1]
return [1] + ([0] * len(A)) + [1, 1] + ([0] * len(A)) + [1]
def _lowerCamelCase ( self : List[str] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : Any , A : Optional[int] , A : str=False , **A : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(A) > 0 and not text[0].isspace()):
_UpperCAmelCase = ' ' + text
return (text, kwargs)
def _lowerCamelCase ( self : List[Any] , A : Union[Dict[str, EncodedInput], BatchEncoding] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ) -> dict:
"""simple docstring"""
_UpperCAmelCase = super()._pad(
encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase = len(encoded_inputs['global_attention_mask']) != len(A)
if needs_to_be_padded:
_UpperCAmelCase = len(A) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
from __future__ import annotations
def A ( _UpperCAmelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCAmelCase , [] , 0 , [0 for i in range(len(_UpperCAmelCase ) )] )
def A ( _UpperCAmelCase : list[int | str] , _UpperCAmelCase : list[int | str] , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
for i in range(len(_UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 , _UpperCAmelCase )
current_sequence.pop()
_UpperCAmelCase = False
UpperCAmelCase__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCAmelCase__ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
UpperCAmelCase__ = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
return sd
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=rename_keys_prefix ) -> str:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_UpperCAmelCase = key
for name_pair in rename_keys_prefix:
_UpperCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
_UpperCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_UpperCAmelCase = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
_UpperCAmelCase = 'pretraining'
if "vcr" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048}
elif "vqa" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048}
elif "nlvr" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 1_024}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 512}
_UpperCAmelCase = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048}
_UpperCAmelCase = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048, 'num_labels': 3_129}
_UpperCAmelCase = 'vqa'
elif "nlvr" in checkpoint_path:
_UpperCAmelCase = {
'visual_embedding_dim': 1_024,
'num_labels': 2,
}
_UpperCAmelCase = 'nlvr'
_UpperCAmelCase = VisualBertConfig(**_UpperCAmelCase )
# Load State Dict
_UpperCAmelCase = load_state_dict(_UpperCAmelCase )
_UpperCAmelCase = get_new_dict(_UpperCAmelCase , _UpperCAmelCase )
if model_type == "pretraining":
_UpperCAmelCase = VisualBertForPreTraining(_UpperCAmelCase )
elif model_type == "vqa":
_UpperCAmelCase = VisualBertForQuestionAnswering(_UpperCAmelCase )
elif model_type == "nlvr":
_UpperCAmelCase = VisualBertForVisualReasoning(_UpperCAmelCase )
elif model_type == "multichoice":
_UpperCAmelCase = VisualBertForMultipleChoice(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# Save Checkpoints
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 639
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 1
|
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=A ):
UpperCamelCase = ['''torch''', '''scipy''']
def __init__( self : Any , *A : Any , **A : str) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['torch', 'scipy'])
@classmethod
def _lowerCamelCase ( cls : Dict , *A : int , **A : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *A : List[Any] , **A : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'])
| 639
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 1
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( A ):
UpperCamelCase = 42
UpperCamelCase = None
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any=0.999 , _UpperCAmelCase : int="cosine" , ) -> str:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_UpperCAmelCase = []
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class __lowerCAmelCase ( A , A ):
@register_to_config
def __init__( self : int , A : int = 10_00 , A : str = "fixed_small_log" , A : bool = True , A : Optional[float] = 1.0 , A : str = "epsilon" , A : str = "squaredcos_cap_v2" , ) -> Any:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'')
_UpperCAmelCase = betas_for_alpha_bar(A)
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0)
_UpperCAmelCase = torch.tensor(1.0)
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , A)[::-1].copy())
_UpperCAmelCase = variance_type
def _lowerCamelCase ( self : Optional[int] , A : torch.FloatTensor , A : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCamelCase ( self : Optional[int] , A : int , A : Union[str, torch.device] = None) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_UpperCAmelCase = (np.arange(0 , A) * step_ratio).round()[::-1].copy().astype(np.intaa)
_UpperCAmelCase = torch.from_numpy(A).to(A)
def _lowerCamelCase ( self : Tuple , A : List[str] , A : Dict=None , A : Optional[Any]=None , A : Union[str, Any]=None) -> List[str]:
"""simple docstring"""
if prev_timestep is None:
_UpperCAmelCase = t - 1
_UpperCAmelCase = self.alphas_cumprod[t]
_UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCAmelCase = 1 - alpha_prod_t
_UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCAmelCase = self.betas[t]
else:
_UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_UpperCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_UpperCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_UpperCAmelCase = torch.log(torch.clamp(A , min=1E-20))
_UpperCAmelCase = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_UpperCAmelCase = variance.log()
_UpperCAmelCase = beta.log()
_UpperCAmelCase = (predicted_variance + 1) / 2
_UpperCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : Optional[int] = None , A : Any=None , A : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_UpperCAmelCase , _UpperCAmelCase = torch.split(A , sample.shape[1] , dim=1)
else:
_UpperCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
_UpperCAmelCase = t - 1
_UpperCAmelCase = self.alphas_cumprod[t]
_UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCAmelCase = 1 - alpha_prod_t
_UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCAmelCase = self.betas[t]
_UpperCAmelCase = self.alphas[t]
else:
_UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
_UpperCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
' for the UnCLIPScheduler.')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = torch.clamp(
A , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_UpperCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCAmelCase = 0
if t > 0:
_UpperCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=A , device=model_output.device)
_UpperCAmelCase = self._get_variance(
A , predicted_variance=A , prev_timestep=A , )
if self.variance_type == "fixed_small_log":
_UpperCAmelCase = variance
elif self.variance_type == "learned_range":
_UpperCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
' for the UnCLIPScheduler.')
_UpperCAmelCase = variance * variance_noise
_UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=A , pred_original_sample=A)
def _lowerCamelCase ( self : Optional[Any] , A : torch.FloatTensor , A : torch.FloatTensor , A : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
_UpperCAmelCase = timesteps.to(original_samples.device)
_UpperCAmelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
_UpperCAmelCase = sqrt_alpha_prod.unsqueeze(-1)
_UpperCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
_UpperCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1)
_UpperCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 639
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCAmelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def A ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {}
with open(_UpperCAmelCase , 'r' ) as file:
for line_number, line in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = line.strip()
if line:
_UpperCAmelCase = line.split()
_UpperCAmelCase = line_number
_UpperCAmelCase = words[0]
_UpperCAmelCase = value
return result
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
_UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
_UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
_UpperCAmelCase = value[0]
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
_UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
_UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
_UpperCAmelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase = '.'.join([key, hf_param_name] )
else:
_UpperCAmelCase = key
_UpperCAmelCase = value if 'lm_head' in full_key else value[0]
UpperCAmelCase__ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=None ) -> str:
'''simple docstring'''
_UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(_UpperCAmelCase )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = 'weight'
else:
_UpperCAmelCase = None
if hf_dict is not None:
rename_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return is_used
return is_used
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
_UpperCAmelCase = load_wavaveca_layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
else:
_UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
_UpperCAmelCase = read_txt_into_dict(_UpperCAmelCase )
_UpperCAmelCase = idalabel
_UpperCAmelCase = WavaVecaForSequenceClassification(_UpperCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
feature_extractor.save_pretrained(_UpperCAmelCase )
elif is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase = 0
_UpperCAmelCase = 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = WavaVecaCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , )
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
_UpperCAmelCase = WavaVecaForCTC(_UpperCAmelCase )
else:
_UpperCAmelCase = WavaVecaForPreTraining(_UpperCAmelCase )
if is_finetuned or is_seq_class:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
_UpperCAmelCase = fairseq.tasks.setup_task(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCAmelCase )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase__ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase__ = "UperNetConfig"
class __lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , A : int , A : int , A : Union[int, Tuple[int, int]] , A : Union[int, Tuple[int, int], str] = 0 , A : bool = False , A : Union[int, Tuple[int, int]] = 1 , ) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , padding=A , bias=A , dilation=A , )
_UpperCAmelCase = nn.BatchNormad(A)
_UpperCAmelCase = nn.ReLU()
def _lowerCamelCase ( self : Union[str, Any] , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = self.conv(A)
_UpperCAmelCase = self.batch_norm(A)
_UpperCAmelCase = self.activation(A)
return output
class __lowerCAmelCase ( nn.Module ):
def __init__( self : str , A : int , A : int , A : int) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = [
nn.AdaptiveAvgPoolad(A),
UperNetConvModule(A , A , kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(A) , A)
def _lowerCamelCase ( self : Any , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , A : Tuple[int, ...] , A : int , A : int , A : bool) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = pool_scales
_UpperCAmelCase = align_corners
_UpperCAmelCase = in_channels
_UpperCAmelCase = channels
_UpperCAmelCase = []
for i, pool_scale in enumerate(A):
_UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=A , in_channels=A , channels=A)
self.blocks.append(A)
self.add_module(str(A) , A)
def _lowerCamelCase ( self : str , A : torch.Tensor) -> List[torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = []
for ppm in self.blocks:
_UpperCAmelCase = ppm(A)
_UpperCAmelCase = nn.functional.interpolate(
A , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners)
ppm_outs.append(A)
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , A : Tuple , A : List[str]) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_UpperCAmelCase = in_channels
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = False
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1)
# PSP Module
_UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_UpperCAmelCase = UperNetConvModule(A , self.channels , kernel_size=1)
_UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1)
self.lateral_convs.append(A)
self.fpn_convs.append(A)
_UpperCAmelCase = UperNetConvModule(
len(self.in_channels) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.apply(self._init_weights)
def _lowerCamelCase ( self : Optional[Any] , A : Dict) -> Any:
"""simple docstring"""
if isinstance(A , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self : Tuple , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = inputs[-1]
_UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(A))
_UpperCAmelCase = torch.cat(A , dim=1)
_UpperCAmelCase = self.bottleneck(A)
return output
def _lowerCamelCase ( self : List[str] , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(A))
# build top-down path
_UpperCAmelCase = len(A)
for i in range(used_backbone_levels - 1 , 0 , -1):
_UpperCAmelCase = laterals[i - 1].shape[2:]
_UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=A , mode='bilinear' , align_corners=self.align_corners)
# build outputs
_UpperCAmelCase = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1 , 0 , -1):
_UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners)
_UpperCAmelCase = torch.cat(A , dim=1)
_UpperCAmelCase = self.fpn_bottleneck(A)
_UpperCAmelCase = self.classifier(A)
return output
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Any , A : List[str] , A : int = 2 , A : int = 3 , A : Union[int, Tuple[int, int]] = 1) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.auxiliary_in_channels
_UpperCAmelCase = config.auxiliary_channels
_UpperCAmelCase = config.auxiliary_num_convs
_UpperCAmelCase = config.auxiliary_concat_input
_UpperCAmelCase = in_index
_UpperCAmelCase = (kernel_size // 2) * dilation
_UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=A , padding=A , dilation=A))
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=A , padding=A , dilation=A))
if self.num_convs == 0:
_UpperCAmelCase = nn.Identity()
else:
_UpperCAmelCase = nn.Sequential(*A)
if self.concat_input:
_UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=A , padding=kernel_size // 2)
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any]) -> Any:
"""simple docstring"""
if isinstance(A , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self : Tuple , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = encoder_hidden_states[self.in_index]
_UpperCAmelCase = self.convs(A)
if self.concat_input:
_UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1))
_UpperCAmelCase = self.classifier(A)
return output
class __lowerCAmelCase ( A ):
UpperCamelCase = UperNetConfig
UpperCamelCase = '''pixel_values'''
UpperCamelCase = True
def _lowerCamelCase ( self : Any , A : str) -> Dict:
"""simple docstring"""
if isinstance(A , A):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCamelCase ( self : str , A : str , A : int=False) -> Dict:
"""simple docstring"""
if isinstance(A , A):
_UpperCAmelCase = value
UpperCAmelCase__ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , A , )
class __lowerCAmelCase ( A ):
def __init__( self : Dict , A : Optional[int]) -> int:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
_UpperCAmelCase = UperNetHead(A , in_channels=self.backbone.channels)
_UpperCAmelCase = UperNetFCNHead(A) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@replace_return_docstrings(output_type=A , config_class=_CONFIG_FOR_DOC)
def _lowerCamelCase ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
A , output_hidden_states=A , output_attentions=A)
_UpperCAmelCase = outputs.feature_maps
_UpperCAmelCase = self.decode_head(A)
_UpperCAmelCase = nn.functional.interpolate(A , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = None
if self.auxiliary_head is not None:
_UpperCAmelCase = self.auxiliary_head(A)
_UpperCAmelCase = nn.functional.interpolate(
A , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one')
else:
# compute weighted loss
_UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
_UpperCAmelCase = loss_fct(A , A)
_UpperCAmelCase = loss_fct(A , A)
_UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_UpperCAmelCase = (logits,) + outputs[1:]
else:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=A , logits=A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 639
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 1
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase__ = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
UpperCAmelCase__ = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def A ( _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = None
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_UpperCAmelCase )
_UpperCAmelCase = _re_checkpoint.findall(_UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_UpperCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = ckpt_name
break
return checkpoint
def A ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase = get_checkpoint_from_config_class(_UpperCAmelCase )
_UpperCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = '\n'.join(sorted(_UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
_UpperCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _UpperCAmelCase , )
is not None
):
_UpperCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_UpperCAmelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_UpperCAmelCase = True
if not attribute_used:
_UpperCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase = True
elif attribute.endswith('_token_id' ):
_UpperCAmelCase = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A ( _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_UpperCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase = inspect.getsourcefile(_UpperCAmelCase )
_UpperCAmelCase = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_UpperCAmelCase = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCAmelCase = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = unused_attributes
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
def A ( _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('Input must be a positive integer' )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCAmelCase ):
_UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 639
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 1
|
import socket
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_UpperCAmelCase = socket.gethostname()
_UpperCAmelCase = 12_312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_UpperCAmelCase = sock.recv(1_024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 639
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
import requests
UpperCAmelCase__ = "" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = "https://api.openweathermap.org/data/2.5/"
def A ( _UpperCAmelCase : str = "Chicago" , _UpperCAmelCase : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A ( _UpperCAmelCase : str = "Kolkata, India" , _UpperCAmelCase : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A ( _UpperCAmelCase : float = 55.68 , _UpperCAmelCase : float = 12.57 , _UpperCAmelCase : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 639
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ = 5
UpperCAmelCase__ = 10
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = SpeechaTextTokenizer
UpperCamelCase = False
UpperCamelCase = True
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(A)
_UpperCAmelCase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(A))]
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = Path(self.tmpdirname)
save_json(A , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A) , A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A) , A)
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(A) , 10_01)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCAmelCase = tokenizer.tokenize('This is a test')
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
A , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
self.assertListEqual(A , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertListEqual(
A , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
UpperCamelCase = '''C\'est trop cool'''
UpperCamelCase = '''Esto es genial'''
@classmethod
def _lowerCamelCase ( cls : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
self.assertIn(A , self.tokenizer.all_special_ids)
_UpperCAmelCase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCAmelCase = self.tokenizer.decode(A , skip_special_tokens=A)
_UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A)
self.assertEqual(A , A)
self.assertNotIn(self.tokenizer.eos_token , A)
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'fr'
_UpperCAmelCase = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , A)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCAmelCase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCAmelCase__ = "src/transformers"
UpperCAmelCase__ = "docs/source/en/tasks"
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase = f.readlines()
# Find the start prompt.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
UpperCAmelCase__ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCAmelCase__ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def A ( _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
_UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=False ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_UpperCAmelCase = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
' to fix this.' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 639
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 1
|
def A ( _UpperCAmelCase : int = 4_000_000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(A )
class __lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
super().__init__(**A)
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch.")
requires_backends(self , 'vision')
self.check_model_type(A)
def __call__( self : str , A : Union[str, "Image.Image", List[Dict[str, Any]]] , A : Union[str, List[str]] = None , **A : Dict , ) -> Optional[int]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCAmelCase = kwargs.pop('text_queries')
if isinstance(A , (str, Image.Image)):
_UpperCAmelCase = {'image': image, 'candidate_labels': candidate_labels}
else:
_UpperCAmelCase = image
_UpperCAmelCase = super().__call__(A , **A)
return results
def _lowerCamelCase ( self : Dict , **A : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs['threshold']
if "top_k" in kwargs:
_UpperCAmelCase = kwargs['top_k']
return {}, {}, postprocess_params
def _lowerCamelCase ( self : List[Any] , A : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = load_image(inputs['image'])
_UpperCAmelCase = inputs['candidate_labels']
if isinstance(A , A):
_UpperCAmelCase = candidate_labels.split(',')
_UpperCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(A):
_UpperCAmelCase = self.tokenizer(A , return_tensors=self.framework)
_UpperCAmelCase = self.image_processor(A , return_tensors=self.framework)
yield {
"is_last": i == len(A) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCamelCase ( self : str , A : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = model_inputs.pop('target_size')
_UpperCAmelCase = model_inputs.pop('candidate_label')
_UpperCAmelCase = model_inputs.pop('is_last')
_UpperCAmelCase = self.model(**A)
_UpperCAmelCase = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def _lowerCamelCase ( self : str , A : List[str] , A : Dict=0.1 , A : Optional[Any]=None) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for model_output in model_outputs:
_UpperCAmelCase = model_output['candidate_label']
_UpperCAmelCase = BaseModelOutput(A)
_UpperCAmelCase = self.image_processor.post_process_object_detection(
outputs=A , threshold=A , target_sizes=model_output['target_size'])[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase = outputs['scores'][index].item()
_UpperCAmelCase = self._get_bounding_box(outputs['boxes'][index][0])
_UpperCAmelCase = {'score': score, 'label': label, 'box': box}
results.append(A)
_UpperCAmelCase = sorted(A , key=lambda A: x["score"] , reverse=A)
if top_k:
_UpperCAmelCase = results[:top_k]
return results
def _lowerCamelCase ( self : Tuple , A : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.')
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 639
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 1
|
import sys
import turtle
def A ( _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : tuple[float, float] ) -> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A ( _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : int , ) -> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , depth - 1 )
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , depth - 1 )
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCAmelCase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCAmelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
from heapq import heappop, heappush
import numpy as np
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : tuple[int, int] , _UpperCAmelCase : tuple[int, int] , _UpperCAmelCase : bool , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = grid.shape
_UpperCAmelCase = [-1, 1, 0, 0]
_UpperCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_UpperCAmelCase , _UpperCAmelCase = [(0, source)], set()
_UpperCAmelCase = np.full((rows, cols) , np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = np.empty((rows, cols) , dtype=_UpperCAmelCase )
_UpperCAmelCase = None
while queue:
((_UpperCAmelCase) , (_UpperCAmelCase)) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_UpperCAmelCase = []
while (x, y) != source:
path.append((x, y) )
_UpperCAmelCase , _UpperCAmelCase = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
_UpperCAmelCase , _UpperCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_UpperCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
_UpperCAmelCase = dist + 1
_UpperCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def A ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 9
_UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCAmelCase = kruskal(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_UpperCAmelCase ) == sorted(_UpperCAmelCase )
| 639
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def A ( _UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(_UpperCAmelCase )
_UpperCAmelCase = importlib.import_module(F".{module_name}" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def A ( _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict[str, str]] = None , _UpperCAmelCase : Optional[Union[bool, str]] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Tuple , ) -> str:
'''simple docstring'''
_UpperCAmelCase = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class __lowerCAmelCase :
def __init__( self : Tuple) -> Tuple:
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(A)
def _lowerCamelCase ( cls : Tuple , A : Union[str, Any] , **A : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('config' , A)
_UpperCAmelCase = kwargs.pop('trust_remote_code' , A)
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(A , **A)
_UpperCAmelCase = config_dict.get('feature_extractor_type' , A)
_UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}):
_UpperCAmelCase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A):
_UpperCAmelCase = AutoConfig.from_pretrained(A , **A)
# It could be in `config.feature_extractor_type``
_UpperCAmelCase = getattr(A , 'feature_extractor_type' , A)
if hasattr(A , 'auto_map') and "AutoFeatureExtractor" in config.auto_map:
_UpperCAmelCase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_UpperCAmelCase = feature_extractor_class_from_name(A)
_UpperCAmelCase = feature_extractor_auto_map is not None
_UpperCAmelCase = feature_extractor_class is not None or type(A) in FEATURE_EXTRACTOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
A , A , A , A)
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
A , A , **A)
_UpperCAmelCase = kwargs.pop('code_revision' , A)
if os.path.isdir(A):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A) in FEATURE_EXTRACTOR_MAPPING:
_UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(A)]
return feature_extractor_class.from_dict(A , **A)
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def _lowerCamelCase ( A : int , A : Any) -> Union[str, Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(A , A)
| 639
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 1
|
def A ( _UpperCAmelCase : int = 50 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 639
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , A : List[Any] , A : str=7 , A : Tuple=3 , A : Dict=18 , A : int=30 , A : Optional[Any]=4_00 , A : int=True , A : List[Any]=None , A : int=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = apply_ocr
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase = LayoutLMvaImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A , 'do_resize'))
self.assertTrue(hasattr(A , 'size'))
self.assertTrue(hasattr(A , 'apply_ocr'))
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A)
for image in image_inputs:
self.assertIsInstance(A , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A)
self.assertIsInstance(encoding.boxes , A)
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A)
for image in image_inputs:
self.assertIsInstance(A , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A)
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
_UpperCAmelCase = Image.open(ds[0]['file']).convert('RGB')
_UpperCAmelCase = image_processing(A , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_UpperCAmelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A)
self.assertListEqual(encoding.boxes , A)
# with apply_OCR = False
_UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=A)
_UpperCAmelCase = image_processing(A , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( _UpperCAmelCase : int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_UpperCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
_UpperCAmelCase = QuantumRegister(_UpperCAmelCase , 'qr' )
_UpperCAmelCase = ClassicalRegister(_UpperCAmelCase , 'cr' )
_UpperCAmelCase = QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = number_of_qubits
for i in range(_UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCAmelCase , _UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCAmelCase , _UpperCAmelCase )
# simulate with 10000 shots
_UpperCAmelCase = Aer.get_backend('qasm_simulator' )
_UpperCAmelCase = execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_000 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 639
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCAmelCase__ = 0
UpperCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCAmelCase__ = tuple[int, int]
class __lowerCAmelCase :
def __init__( self : str , A : int , A : int , A : int , A : int , A : int , A : Node | None , ) -> None:
"""simple docstring"""
_UpperCAmelCase = pos_x
_UpperCAmelCase = pos_y
_UpperCAmelCase = (pos_y, pos_x)
_UpperCAmelCase = goal_x
_UpperCAmelCase = goal_y
_UpperCAmelCase = g_cost
_UpperCAmelCase = parent
_UpperCAmelCase = self.calculate_heuristic()
_UpperCAmelCase = self.g_cost + self.h_cost
def _lowerCamelCase ( self : Tuple) -> float:
"""simple docstring"""
_UpperCAmelCase = self.pos_x - self.goal_x
_UpperCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A) + abs(A)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Tuple , A : Node) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class __lowerCAmelCase :
def __init__( self : str , A : TPosition , A : TPosition) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A)
_UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , A)
_UpperCAmelCase = [self.start]
_UpperCAmelCase = []
_UpperCAmelCase = False
def _lowerCamelCase ( self : Optional[int]) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(A)
self.closed_nodes.append(A)
_UpperCAmelCase = self.get_successors(A)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A)
else:
# retrieve the best current path
_UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(A))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A)
else:
self.open_nodes.append(A)
return [self.start.pos]
def _lowerCamelCase ( self : str , A : Node) -> list[Node]:
"""simple docstring"""
_UpperCAmelCase = []
for action in delta:
_UpperCAmelCase = parent.pos_x + action[1]
_UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(A) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A , A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A , ))
return successors
def _lowerCamelCase ( self : List[Any] , A : Node | None) -> list[TPosition]:
"""simple docstring"""
_UpperCAmelCase = node
_UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
_UpperCAmelCase = current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
def __init__( self : List[str] , A : TPosition , A : TPosition) -> None:
"""simple docstring"""
_UpperCAmelCase = AStar(A , A)
_UpperCAmelCase = AStar(A , A)
_UpperCAmelCase = False
def _lowerCamelCase ( self : Optional[int]) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_UpperCAmelCase = self.fwd_astar.open_nodes.pop(0)
_UpperCAmelCase = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A , A)
self.fwd_astar.closed_nodes.append(A)
self.bwd_astar.closed_nodes.append(A)
_UpperCAmelCase = current_bwd_node
_UpperCAmelCase = current_fwd_node
_UpperCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A),
self.bwd_astar: self.bwd_astar.get_successors(A),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A)
else:
# retrieve the best current path
_UpperCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(A))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A)
else:
astar.open_nodes.append(A)
return [self.fwd_astar.start.pos]
def _lowerCamelCase ( self : str , A : Node , A : Node) -> list[TPosition]:
"""simple docstring"""
_UpperCAmelCase = self.fwd_astar.retrace_path(A)
_UpperCAmelCase = self.bwd_astar.retrace_path(A)
bwd_path.pop()
bwd_path.reverse()
_UpperCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = AStar(init, goal)
UpperCAmelCase__ = a_star.search()
UpperCAmelCase__ = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = BidirectionalAStar(init, goal)
UpperCAmelCase__ = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 639
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
_UpperCAmelCase = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , A)
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.feature_extraction_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
# load decoder from hub
_UpperCAmelCase = 'hf-internal-testing/ngram-beam-search-decoder'
def _lowerCamelCase ( self : Optional[int] , **A : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A)
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , **A : Tuple) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Union[str, Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A)
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname)
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , A)
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , A)
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels)
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A)
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
processor.save_pretrained(self.tmpdirname)
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3)
# decoder
self.assertEqual(processor.language_model.alpha , 5.0)
self.assertEqual(processor.language_model.beta , 3.0)
self.assertEqual(processor.language_model.score_boundary , -7.0)
self.assertEqual(processor.language_model.unk_score_offset , 3)
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'])
with self.assertRaisesRegex(A , 'include'):
WavaVecaProcessorWithLM(
tokenizer=A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
_UpperCAmelCase = floats_list((3, 10_00))
_UpperCAmelCase = feature_extractor(A , return_tensors='np')
_UpperCAmelCase = processor(A , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
_UpperCAmelCase = 'This is a test string'
_UpperCAmelCase = processor(text=A)
_UpperCAmelCase = tokenizer(A)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int]=(2, 10, 16) , A : List[Any]=77) -> int:
"""simple docstring"""
np.random.seed(A)
return np.random.rand(*A)
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
_UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13)
_UpperCAmelCase = processor.decode(A)
_UpperCAmelCase = decoder.decode_beams(A)[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text)
self.assertEqual('</s> <s> </s>' , decoded_processor.text)
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score)
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score)
@parameterized.expand([[None], ['fork'], ['spawn']])
def _lowerCamelCase ( self : str , A : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
_UpperCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase = processor.batch_decode(A)
else:
with get_context(A).Pool() as pool:
_UpperCAmelCase = processor.batch_decode(A , A)
_UpperCAmelCase = list(A)
with get_context('fork').Pool() as p:
_UpperCAmelCase = decoder.decode_beams_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0])
logit_scores_decoder.append(beams[0][-2])
lm_scores_decoder.append(beams[0][-1])
self.assertListEqual(A , decoded_processor.text)
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text)
self.assertListEqual(A , decoded_processor.logit_score)
self.assertListEqual(A , decoded_processor.lm_score)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = 15
_UpperCAmelCase = -2_0.0
_UpperCAmelCase = -4.0
_UpperCAmelCase = processor.batch_decode(
A , beam_width=A , beam_prune_logp=A , token_min_logp=A , )
_UpperCAmelCase = decoded_processor_out.text
_UpperCAmelCase = list(A)
with get_context('fork').Pool() as pool:
_UpperCAmelCase = decoder.decode_beams_batch(
A , A , beam_width=A , beam_prune_logp=A , token_min_logp=A , )
_UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A , A)
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , A)
self.assertTrue(np.array_equal(A , decoded_processor_out.logit_score))
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , A , atol=1E-3))
self.assertTrue(np.array_equal(A , decoded_processor_out.lm_score))
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , A , atol=1E-3))
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = 2.0
_UpperCAmelCase = 5.0
_UpperCAmelCase = -2_0.0
_UpperCAmelCase = True
_UpperCAmelCase = processor.batch_decode(
A , alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , )
_UpperCAmelCase = decoded_processor_out.text
_UpperCAmelCase = list(A)
decoder.reset_params(
alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , )
with get_context('fork').Pool() as pool:
_UpperCAmelCase = decoder.decode_beams_batch(
A , A , )
_UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A , A)
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , A)
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0)
self.assertEqual(lm_model.beta , 5.0)
self.assertEqual(lm_model.unk_score_offset , -2_0.0)
self.assertEqual(lm_model.score_boundary , A)
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
_UpperCAmelCase = os.listdir(A)
_UpperCAmelCase = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = snapshot_download('hf-internal-testing/processor_with_lm')
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(A)
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
_UpperCAmelCase = os.listdir(A)
_UpperCAmelCase = os.listdir(A)
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A , A)
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_UpperCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm')
_UpperCAmelCase = floats_list((3, 10_00))
_UpperCAmelCase = processor_wavaveca(A , return_tensors='np')
_UpperCAmelCase = processor_auto(A , return_tensors='np')
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2)
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = processor_wavaveca.batch_decode(A)
_UpperCAmelCase = processor_auto.batch_decode(A)
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text)
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A)
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def _lowerCamelCase ( A : List[str] , A : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_UpperCAmelCase = self._get_dummy_logits()[0]
_UpperCAmelCase = processor.decode(A , output_word_offsets=A)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(A , A))
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word')) , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset') , [1, 3, 5])
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = processor.batch_decode(A , output_word_offsets=A)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(A , A))
self.assertListEqual(
[' '.join(self.get_from_offsets(A , 'word')) for o in outputs['word_offsets']] , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset') , [1, 3, 5])
@slow
@require_torch
@require_torchaudio
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
import torch
_UpperCAmelCase = load_dataset('common_voice' , 'en' , split='train' , streaming=A)
_UpperCAmelCase = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00))
_UpperCAmelCase = iter(A)
_UpperCAmelCase = next(A)
_UpperCAmelCase = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
_UpperCAmelCase = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase = processor(sample['audio']['array'] , return_tensors='pt').input_values
with torch.no_grad():
_UpperCAmelCase = model(A).logits.cpu().numpy()
_UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=A)
_UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
_UpperCAmelCase = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(A , 'word')) , A)
self.assertEqual(' '.join(self.get_from_offsets(A , 'word')) , output.text)
# output times
_UpperCAmelCase = torch.tensor(self.get_from_offsets(A , 'start_time'))
_UpperCAmelCase = torch.tensor(self.get_from_offsets(A , 'end_time'))
# fmt: off
_UpperCAmelCase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9])
_UpperCAmelCase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4])
# fmt: on
self.assertTrue(torch.allclose(A , A , atol=0.0_1))
self.assertTrue(torch.allclose(A , A , atol=0.0_1))
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
# getting number of pixels in the image
_UpperCAmelCase , _UpperCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
_UpperCAmelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase__ = imread("image_data/lena.jpg", 1)
# convert to its negative
UpperCAmelCase__ = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : Dict , A : Optional[Any]=10_00_00 , A : Any=40_96 , A : Dict=1_10_08 , A : Optional[Any]=32 , A : List[Any]=32 , A : List[Any]="silu" , A : Optional[Any]=20_48 , A : Dict=0.0_2 , A : Union[str, Any]=1E-6 , A : str=True , A : List[str]=0 , A : Tuple=1 , A : str=2 , A : Dict=False , A : Any=True , A : Any=0.1 , A : Optional[Any]=0.1 , A : int=True , A : Optional[int]=True , A : Optional[int]=None , **A : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 639
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCAmelCase__ = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"RUCAIBox/mvp": 1024,
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = MvpTokenizer
def __init__( self : Optional[int] , A : Optional[int]=None , A : Any=None , A : Tuple=None , A : Dict="replace" , A : int="<s>" , A : Tuple="</s>" , A : Any="</s>" , A : str="<s>" , A : Optional[int]="<unk>" , A : Dict="<pad>" , A : int="<mask>" , A : Any=False , A : Optional[int]=True , **A : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , A) != add_prefix_space:
_UpperCAmelCase = getattr(A , pre_tok_state.pop('type'))
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**A)
_UpperCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase = 'post_processor'
_UpperCAmelCase = getattr(self.backend_tokenizer , A , A)
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state['sep'])
if "cls" in state:
_UpperCAmelCase = tuple(state['cls'])
_UpperCAmelCase = False
if state.get('add_prefix_space' , A) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get('trim_offsets' , A) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(A , state.pop('type'))
_UpperCAmelCase = component_class(**A)
setattr(self.backend_tokenizer , A , A)
@property
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def _lowerCamelCase ( self : Dict , A : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else value
_UpperCAmelCase = value
def _lowerCamelCase ( self : List[Any] , *A : Dict , **A : Optional[int]) -> BatchEncoding:
"""simple docstring"""
_UpperCAmelCase = kwargs.get('is_split_into_words' , A)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*A , **A)
def _lowerCamelCase ( self : Union[str, Any] , *A : Any , **A : Dict) -> BatchEncoding:
"""simple docstring"""
_UpperCAmelCase = kwargs.get('is_split_into_words' , A)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._encode_plus(*A , **A)
def _lowerCamelCase ( self : Any , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(A , name=A)
return tuple(A)
def _lowerCamelCase ( self : Any , A : str , A : Optional[int]=None) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 639
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 1
|
from __future__ import annotations
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = str(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 9 and set(_UpperCAmelCase ) == set('123456789' )
def A ( ) -> int | None:
'''simple docstring'''
for base_num in range(9_999 , 4_999 , -1 ):
_UpperCAmelCase = 100_002 * base_num
if is_9_pandigital(_UpperCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_UpperCAmelCase = 1_002_003 * base_num
if is_9_pandigital(_UpperCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''input_values''', '''padding_mask''']
def __init__( self : Any , A : int = 1 , A : int = 2_40_00 , A : float = 0.0 , A : float = None , A : float = None , **A : Dict , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=A , sampling_rate=A , padding_value=A , **A)
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = overlap
@property
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
def __call__( self : Tuple , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[bool, str, PaddingStrategy]] = None , A : Optional[bool] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[int] = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.')
elif padding is None:
# by default let's pad the inputs
_UpperCAmelCase = True
_UpperCAmelCase = bool(
isinstance(A , (list, tuple)) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list))))
if is_batched:
_UpperCAmelCase = [np.asarray(A , dtype=np.floataa).T for audio in raw_audio]
elif not is_batched and not isinstance(A , np.ndarray):
_UpperCAmelCase = np.asarray(A , dtype=np.floataa)
elif isinstance(A , np.ndarray) and raw_audio.dtype is np.dtype(np.floataa):
_UpperCAmelCase = raw_audio.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray(A).T]
# verify inputs are valid
for idx, example in enumerate(A):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels")
_UpperCAmelCase = None
_UpperCAmelCase = BatchFeature({'input_values': raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCAmelCase = min(array.shape[0] for array in raw_audio)
_UpperCAmelCase = int(np.floor(max_length / self.chunk_stride))
_UpperCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCAmelCase = max(array.shape[0] for array in raw_audio)
_UpperCAmelCase = int(np.ceil(max_length / self.chunk_stride))
_UpperCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCAmelCase = 'max_length'
else:
_UpperCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCAmelCase = self.pad(
A , max_length=A , truncation=A , padding=A , return_attention_mask=A , )
if padding:
_UpperCAmelCase = padded_inputs.pop('attention_mask')
_UpperCAmelCase = []
for example in padded_inputs.pop('input_values'):
if self.feature_size == 1:
_UpperCAmelCase = example[..., None]
input_values.append(example.T)
_UpperCAmelCase = input_values
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(A)
return padded_inputs
| 639
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 1
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = 50 # max width of layer names
UpperCAmelCase__ = 70 # max width of quantizer names
def A ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_UpperCAmelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_UpperCAmelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_UpperCAmelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_UpperCAmelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_UpperCAmelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_UpperCAmelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def A ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_UpperCAmelCase = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_UpperCAmelCase = 'histogram'
elif args.calibrator == "mse":
_UpperCAmelCase = 'histogram'
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
_UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=_UpperCAmelCase )
_UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_UpperCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_UpperCAmelCase , ['embeddings'] , which='weight' , _disabled=_UpperCAmelCase )
if args.quant_disable:
set_quantizer_by_name(_UpperCAmelCase , [''] , _disabled=_UpperCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_UpperCAmelCase , args.quant_disable_keyword , _disabled=_UpperCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_UpperCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_UpperCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(_UpperCAmelCase )
if args.fuse_qkv:
fuse_qkv(_UpperCAmelCase , _UpperCAmelCase )
if args.clip_gelu:
clip_gelu(_UpperCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
def fusea(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_UpperCAmelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_UpperCAmelCase = qq._amax.detach().item()
_UpperCAmelCase = qk._amax.detach().item()
_UpperCAmelCase = qv._amax.detach().item()
_UpperCAmelCase = max(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
qq._amax.fill_(_UpperCAmelCase )
qk._amax.fill_(_UpperCAmelCase )
qv._amax.fill_(_UpperCAmelCase )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_UpperCAmelCase )
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def A ( _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_UpperCAmelCase = mod.weight.shape[0]
_UpperCAmelCase = mod._weight_quantizer._amax.detach()
_UpperCAmelCase = torch.ones(_UpperCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def A ( _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
_UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_UpperCAmelCase , keepdims=_UpperCAmelCase ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
_UpperCAmelCase = amax
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=25 , _UpperCAmelCase : int=180 , _UpperCAmelCase : int=None ) -> Tuple:
'''simple docstring'''
if ignore is None:
_UpperCAmelCase = []
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = [ignore]
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(_UpperCAmelCase , 'weight' ):
continue
_UpperCAmelCase = max(_UpperCAmelCase , len(_UpperCAmelCase ) )
for name, mod in model.named_modules():
_UpperCAmelCase = getattr(_UpperCAmelCase , '_input_quantizer' , _UpperCAmelCase )
_UpperCAmelCase = getattr(_UpperCAmelCase , '_weight_quantizer' , _UpperCAmelCase )
if not hasattr(_UpperCAmelCase , 'weight' ):
continue
if type(_UpperCAmelCase ) in ignore:
continue
if [True for s in ignore if type(_UpperCAmelCase ) is str and s in name]:
continue
_UpperCAmelCase = F"Act:{input_q.extra_repr()}"
_UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
_UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_UpperCAmelCase ) <= line_width:
logger.info(_UpperCAmelCase )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(_UpperCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if quantizer_mod is not None:
assert hasattr(_UpperCAmelCase , _UpperCAmelCase )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
logger.warning(F"{name} has no {quantizer}" )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]="both" , **_UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , '_input_quantizer' , _UpperCAmelCase , _UpperCAmelCase )
if which in ["weight", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , '_weight_quantizer' , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : int , **_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_input_quantizer' ) or hasattr(_UpperCAmelCase , '_weight_quantizer' ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
set_quantizers(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
class __lowerCAmelCase :
def __init__( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = {}
def _lowerCamelCase ( self : Union[str, Any] , A : int) -> Dict:
"""simple docstring"""
if vertex not in self.adjacency:
_UpperCAmelCase = {}
self.num_vertices += 1
def _lowerCamelCase ( self : Optional[int] , A : Dict , A : int , A : int) -> Tuple:
"""simple docstring"""
self.add_vertex(A)
self.add_vertex(A)
if head == tail:
return
_UpperCAmelCase = weight
_UpperCAmelCase = weight
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.get_edges()
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
edges.remove((tail, head, weight))
for i in range(len(A)):
_UpperCAmelCase = list(edges[i])
edges.sort(key=lambda A: e[2])
for i in range(len(A) - 1):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
_UpperCAmelCase = weight
_UpperCAmelCase = weight
def __str__( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip('\n')
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def _lowerCamelCase ( A : Optional[int]=None , A : str=None) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = Graph()
if vertices is None:
_UpperCAmelCase = []
if edges is None:
_UpperCAmelCase = []
for vertex in vertices:
g.add_vertex(A)
for edge in edges:
g.add_edge(*A)
return g
class __lowerCAmelCase :
def __init__( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
def __len__( self : Any) -> int:
"""simple docstring"""
return len(self.parent)
def _lowerCamelCase ( self : List[Any] , A : int) -> List[str]:
"""simple docstring"""
if item in self.parent:
return self.find(A)
_UpperCAmelCase = item
_UpperCAmelCase = 0
return item
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> int:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A)
if item != self.parent[item]:
_UpperCAmelCase = self.find(self.parent[item])
return self.parent[item]
def _lowerCamelCase ( self : Union[str, Any] , A : Tuple , A : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.find(A)
_UpperCAmelCase = self.find(A)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase = roota
return roota
return None
@staticmethod
def _lowerCamelCase ( A : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = graph.num_vertices
_UpperCAmelCase = Graph.UnionFind()
_UpperCAmelCase = []
while num_components > 1:
_UpperCAmelCase = {}
for vertex in graph.get_vertices():
_UpperCAmelCase = -1
_UpperCAmelCase = graph.get_edges()
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
edges.remove((tail, head, weight))
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
_UpperCAmelCase = union_find.find(A)
_UpperCAmelCase = union_find.find(A)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = cheap_edge[vertex]
if union_find.find(A) != union_find.find(A):
union_find.union(A , A)
mst_edges.append(cheap_edge[vertex])
_UpperCAmelCase = num_components - 1
_UpperCAmelCase = Graph.build(edges=A)
return mst
| 639
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class __lowerCAmelCase ( A ):
UpperCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''audio''': Audio()} )
UpperCamelCase = Features({'''transcription''': Value('''string''' )} )
UpperCamelCase = "audio"
UpperCamelCase = "transcription"
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any]) -> List[str]:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , A):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
_UpperCAmelCase = copy.deepcopy(self)
_UpperCAmelCase = self.input_schema.copy()
_UpperCAmelCase = features[self.audio_column]
_UpperCAmelCase = input_schema
return task_template
@property
def _lowerCamelCase ( self : str) -> Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = FunnelTokenizer
UpperCamelCase = FunnelTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def _lowerCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def _lowerCamelCase ( self : Tuple , **A : Any) -> Dict:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Any , **A : Optional[int]) -> List[Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Tuple , A : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'UNwant\u00E9d,running'
_UpperCAmelCase = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file)
_UpperCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(A , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , [7, 4, 5, 10, 8, 9])
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
_UpperCAmelCase = tokenizer('UNwant\u00E9d,running')
_UpperCAmelCase = len(inputs['input_ids']) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len)
_UpperCAmelCase = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running')
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len)
| 639
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 1
|
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
def __init__( self : Tuple , A : Any=None , **A : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , A , )
super().__init__(args=A , **A)
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __lowerCAmelCase ( A ):
def __init__( self : str , A : Callable , A : Optional[Features] = None , A : str = None , A : bool = False , A : bool = False , A : Optional[dict] = None , A : Optional[int] = None , **A : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(
features=A , cache_dir=A , keep_in_memory=A , streaming=A , num_proc=A , **A , )
_UpperCAmelCase = Generator(
cache_dir=A , features=A , generator=A , gen_kwargs=A , **A , )
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split='train')
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=A , download_mode=A , verification_mode=A , base_path=A , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split='train' , verification_mode=A , in_memory=self.keep_in_memory)
return dataset
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''vivit'''
def __init__( self : Dict , A : Union[str, Any]=2_24 , A : Tuple=32 , A : List[str]=[2, 16, 16] , A : Dict=3 , A : Dict=7_68 , A : Union[str, Any]=12 , A : Dict=12 , A : Union[str, Any]=30_72 , A : Dict="gelu_fast" , A : str=0.0 , A : List[str]=0.0 , A : Optional[int]=0.0_2 , A : Tuple=1E-06 , A : Optional[int]=True , **A : Tuple , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**A)
| 639
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 1
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __lowerCAmelCase :
def __init__( self : int , A : Optional[Any]=2 , A : List[str]=3 , A : Optional[int]=64 , A : Union[str, Any]=None) -> Dict:
"""simple docstring"""
_UpperCAmelCase = np.random.default_rng(A)
_UpperCAmelCase = length
_UpperCAmelCase = rng.normal(size=(length,)).astype(np.floataa)
_UpperCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self : Tuple) -> List[Any]:
"""simple docstring"""
return self.length
def __getitem__( self : Union[str, Any] , A : Tuple) -> List[Any]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __lowerCAmelCase ( torch.nn.Module ):
def __init__( self : int , A : List[str]=0 , A : Any=0 , A : Optional[int]=False) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3]).float())
_UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3]).float())
_UpperCAmelCase = True
def _lowerCamelCase ( self : int , A : Dict=None) -> int:
"""simple docstring"""
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
_UpperCAmelCase = False
return x * self.a[0] + self.b[0]
class __lowerCAmelCase ( torch.nn.Module ):
def __init__( self : Any , A : Optional[Any]=0 , A : Any=0 , A : List[Any]=False) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = torch.nn.Parameter(torch.tensor(A).float())
_UpperCAmelCase = torch.nn.Parameter(torch.tensor(A).float())
_UpperCAmelCase = True
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int]=None) -> str:
"""simple docstring"""
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
_UpperCAmelCase = False
return x * self.a + self.b
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int = 16 ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
_UpperCAmelCase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase )
_UpperCAmelCase = datasets['train'].unique('label' )
_UpperCAmelCase = {v: i for i, v in enumerate(_UpperCAmelCase )}
def tokenize_function(_UpperCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
if "label" in examples:
_UpperCAmelCase = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_UpperCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=2 )
_UpperCAmelCase = DataLoader(tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 639
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 1
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_UpperCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
_UpperCAmelCase = transform(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
return image
def A ( _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
if "visual_encoder" in key:
_UpperCAmelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , _UpperCAmelCase )
if "blocks" in key:
_UpperCAmelCase = re.sub(R'blocks' , 'layers' , _UpperCAmelCase )
if "attn" in key:
_UpperCAmelCase = re.sub(R'attn' , 'self_attn' , _UpperCAmelCase )
if "norm1" in key:
_UpperCAmelCase = re.sub(R'norm1' , 'layer_norm1' , _UpperCAmelCase )
if "norm2" in key:
_UpperCAmelCase = re.sub(R'norm2' , 'layer_norm2' , _UpperCAmelCase )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(R'encoder.norm' , 'post_layernorm' , _UpperCAmelCase )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _UpperCAmelCase )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , _UpperCAmelCase )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , _UpperCAmelCase )
if "self_attn" in key:
_UpperCAmelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , _UpperCAmelCase )
return key
@torch.no_grad()
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(_UpperCAmelCase )
else:
_UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(_UpperCAmelCase ).eval()
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_UpperCAmelCase = blip_decoder(pretrained=_UpperCAmelCase , image_size=384 , vit='base' )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
_UpperCAmelCase = rename_key(_UpperCAmelCase )
_UpperCAmelCase = value
hf_model.load_state_dict(_UpperCAmelCase )
_UpperCAmelCase = 384
_UpperCAmelCase = load_demo_image(image_size=_UpperCAmelCase , device='cpu' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = tokenizer(['a picture of'] ).input_ids
_UpperCAmelCase = hf_model.generate(_UpperCAmelCase , _UpperCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
_UpperCAmelCase = hf_model.generate(_UpperCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_UpperCAmelCase = blip_vqa(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base' )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
_UpperCAmelCase = rename_key(_UpperCAmelCase )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(_UpperCAmelCase )
hf_vqa_model.load_state_dict(_UpperCAmelCase )
_UpperCAmelCase = ['How many dogs are in this image?']
_UpperCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(_UpperCAmelCase , _UpperCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_UpperCAmelCase = blip_itm(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base' )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
_UpperCAmelCase = rename_key(_UpperCAmelCase )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(_UpperCAmelCase )
_UpperCAmelCase = ['A picture of a woman with a dog sitting in a beach']
_UpperCAmelCase = tokenizer(
_UpperCAmelCase , return_tensors='pt' , padding='max_length' , truncation=_UpperCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCAmelCase )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
_UpperCAmelCase = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCAmelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 639
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase__ = "Create a default config file for Accelerate with only a few flags set."
def A ( _UpperCAmelCase : Optional[int]="no" , _UpperCAmelCase : str = default_json_config_file , _UpperCAmelCase : bool = False ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
_UpperCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
_UpperCAmelCase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_UpperCAmelCase = torch.cuda.device_count()
_UpperCAmelCase = num_gpus
_UpperCAmelCase = False
if num_gpus > 1:
_UpperCAmelCase = 'MULTI_GPU'
else:
_UpperCAmelCase = 'NO'
elif is_xpu_available() and use_xpu:
_UpperCAmelCase = torch.xpu.device_count()
_UpperCAmelCase = num_xpus
_UpperCAmelCase = False
if num_xpus > 1:
_UpperCAmelCase = 'MULTI_XPU'
else:
_UpperCAmelCase = 'NO'
elif is_npu_available():
_UpperCAmelCase = torch.npu.device_count()
_UpperCAmelCase = num_npus
_UpperCAmelCase = False
if num_npus > 1:
_UpperCAmelCase = 'MULTI_NPU'
else:
_UpperCAmelCase = 'NO'
else:
_UpperCAmelCase = 0
_UpperCAmelCase = True
_UpperCAmelCase = 1
_UpperCAmelCase = 'NO'
_UpperCAmelCase = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = parser.add_parser('default' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_UpperCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A ( _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A , 'embed_dim'))
self.parent.assertTrue(hasattr(A , 'num_heads'))
class __lowerCAmelCase :
def __init__( self : Optional[int] , A : int , A : int=13 , A : Optional[Any]=64 , A : Union[str, Any]=3 , A : Tuple=[16, 48, 96] , A : int=[1, 3, 6] , A : Any=[1, 2, 10] , A : Any=[7, 3, 3] , A : List[str]=[4, 2, 2] , A : Dict=[2, 1, 1] , A : str=[2, 2, 2] , A : Union[str, Any]=[False, False, True] , A : int=[0.0, 0.0, 0.0] , A : int=0.0_2 , A : Optional[int]=1E-12 , A : Optional[int]=True , A : Tuple=True , A : Dict=2 , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : List[Any] , A : str , A : List[str] , A : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFCvtModel(config=A)
_UpperCAmelCase = model(A , training=A)
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth)):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def _lowerCamelCase ( self : Tuple , A : Optional[int] , A : int , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFCvtForImageClassification(A)
_UpperCAmelCase = model(A , labels=A , training=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFCvtModelTester(self)
_UpperCAmelCase = TFCvtConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions')
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds')
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings')
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8')
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(A)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32')
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
def check_hidden_states_output(A : int , A : Optional[int] , A : List[str]):
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth)
self.assertEqual(len(A) , A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
@slow
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFCvtModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='tf')
# forward pass
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1E-4))
| 639
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 1
|
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=A ):
UpperCamelCase = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *A : str , **A : int) -> Tuple:
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : Dict , *A : Tuple , **A : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : List[str] , *A : Dict , **A : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
class __lowerCAmelCase ( metaclass=A ):
UpperCamelCase = ['''flax''', '''transformers''']
def __init__( self : Tuple , *A : Tuple , **A : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *A : Dict , **A : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : Dict , *A : int , **A : str) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
class __lowerCAmelCase ( metaclass=A ):
UpperCamelCase = ['''flax''', '''transformers''']
def __init__( self : Dict , *A : int , **A : List[str]) -> str:
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : Dict , *A : Any , **A : Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *A : str , **A : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
class __lowerCAmelCase ( metaclass=A ):
UpperCamelCase = ['''flax''', '''transformers''']
def __init__( self : Dict , *A : Tuple , **A : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : Dict , *A : Tuple , **A : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *A : Tuple , **A : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'])
| 639
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def A ( _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __lowerCAmelCase ( A , A , A , unittest.TestCase ):
UpperCamelCase = StableDiffusionLatentUpscalePipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase = frozenset([] )
UpperCamelCase = True
@property
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = 4
_UpperCAmelCase = (16, 16)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(A)
return image
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=A , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=A , only_cross_attention=A , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_UpperCAmelCase = EulerDiscreteScheduler(prediction_type='sample')
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='quick_gelu' , projection_dim=5_12 , )
_UpperCAmelCase = CLIPTextModel(A)
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCAmelCase = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCamelCase ( self : List[Any] , A : Union[str, Any] , A : List[Any]=0) -> List[str]:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = pipe(**A).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3))
_UpperCAmelCase = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(A , 1E-3)
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3)
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3)
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3)
def _lowerCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3)
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3)
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A)
pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = 2
_UpperCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCAmelCase = getattr(A , scheduler_enum.name)
_UpperCAmelCase = scheduler_cls.from_config(pipe.scheduler.config)
_UpperCAmelCase = pipe(**A)[0]
outputs.append(A)
assert check_same_shape(A)
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(33)
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa)
pipe.to('cuda')
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
_UpperCAmelCase = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_UpperCAmelCase = pipe(A , generator=A , output_type='latent').images
_UpperCAmelCase = upscaler(
prompt=A , image=A , num_inference_steps=20 , guidance_scale=0 , generator=A , output_type='np' , ).images[0]
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5E-2
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(33)
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
_UpperCAmelCase = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
_UpperCAmelCase = upscaler(
prompt=A , image=A , num_inference_steps=20 , guidance_scale=0 , generator=A , output_type='np' , ).images[0]
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5E-2
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
import numpy as np
def A ( _UpperCAmelCase : np.array ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( _UpperCAmelCase : np.array ) -> np.array:
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : int , A : float) -> float:
"""simple docstring"""
return 0.0
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> tuple[int | float, int | float]:
'''simple docstring'''
_UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _UpperCAmelCase : FilterType , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_UpperCAmelCase ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.abs(np.fft.fft(_UpperCAmelCase ) )
_UpperCAmelCase = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_UpperCAmelCase = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(_UpperCAmelCase )
plt.show()
def A ( _UpperCAmelCase : FilterType , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_UpperCAmelCase ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
'''simple docstring'''
# Format the message.
if name is None:
_UpperCAmelCase = None
else:
_UpperCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_UpperCAmelCase = fmt.format(_UpperCAmelCase )
# Print and recurse (if needed).
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if msg is not None:
print(_UpperCAmelCase )
for k in val.keys():
recursive_print(_UpperCAmelCase , val[k] , spaces + 2 )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
print(_UpperCAmelCase , ':' , val.size() )
else:
print(_UpperCAmelCase , ':' , _UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_UpperCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_UpperCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
_UpperCAmelCase = param.view(*_UpperCAmelCase )
_UpperCAmelCase = param.transpose(0 , 2 )
_UpperCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_UpperCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
_UpperCAmelCase = param.view(*_UpperCAmelCase )
_UpperCAmelCase = param.transpose(0 , 1 ).contiguous()
_UpperCAmelCase = param.view(*_UpperCAmelCase )
return param
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
# The converted output model.
_UpperCAmelCase = {}
# old versions did not store training args
_UpperCAmelCase = input_state_dict.get('args' , _UpperCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_UpperCAmelCase = ds_args.padded_vocab_size
_UpperCAmelCase = ds_args.max_position_embeddings
_UpperCAmelCase = ds_args.hidden_size
_UpperCAmelCase = ds_args.num_layers
_UpperCAmelCase = ds_args.num_attention_heads
_UpperCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_UpperCAmelCase = config.n_head
# The hidden_size per head.
_UpperCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_UpperCAmelCase = input_state_dict['checkpoint_version']
else:
_UpperCAmelCase = 0.0
# The model.
_UpperCAmelCase = input_state_dict['model']
# The language model.
_UpperCAmelCase = model['language_model']
# The embeddings.
_UpperCAmelCase = lm['embedding']
# The word embeddings.
_UpperCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_UpperCAmelCase = word_embeddings[: config.vocab_size, :]
_UpperCAmelCase = word_embeddings
# The position embeddings.
_UpperCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_UpperCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_UpperCAmelCase = pos_embeddings
# The transformer.
_UpperCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_UpperCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_UpperCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_UpperCAmelCase = layer_re.match(_UpperCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_UpperCAmelCase = int(m.group(1 ) )
# The name of the operation.
_UpperCAmelCase = m.group(2 )
# Is it a weight or a bias?
_UpperCAmelCase = m.group(3 )
# The name of the layer.
_UpperCAmelCase = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_UpperCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_UpperCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_UpperCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
_UpperCAmelCase = torch.tensor(-1E4 , dtype=torch.floataa )
_UpperCAmelCase = masked_bias
_UpperCAmelCase = fix_query_key_value_ordering(_UpperCAmelCase , _UpperCAmelCase , 3 , _UpperCAmelCase , _UpperCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_UpperCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
_UpperCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_UpperCAmelCase = fix_query_key_value_ordering(_UpperCAmelCase , _UpperCAmelCase , 3 , _UpperCAmelCase , _UpperCAmelCase )
# Store. No change of shape.
_UpperCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_UpperCAmelCase = megatron_to_transformers[op_name]
_UpperCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_UpperCAmelCase = megatron_to_transformers[op_name]
_UpperCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_UpperCAmelCase = transformer['final_layernorm.weight']
_UpperCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_UpperCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def A ( ) -> Dict:
'''simple docstring'''
# Create the argument parser.
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_UpperCAmelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_UpperCAmelCase , help='An optional config json file describing the pre-trained model.' , )
_UpperCAmelCase = parser.parse_args()
# Extract the basename.
_UpperCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
else:
_UpperCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
_UpperCAmelCase = input_state_dict.get('args' , _UpperCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_UpperCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
_UpperCAmelCase = 'gelu_new'
else:
_UpperCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_UpperCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
_UpperCAmelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_UpperCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_UpperCAmelCase , summary_activation=_UpperCAmelCase , summary_proj_to_labels=_UpperCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCAmelCase , use_cache=_UpperCAmelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_UpperCAmelCase = GPTaConfig.from_json_file(args.config_file )
_UpperCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_UpperCAmelCase = convert_megatron_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_UpperCAmelCase , _UpperCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_UpperCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_UpperCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_UpperCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_UpperCAmelCase = 'gpt2'
_UpperCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = type(_UpperCAmelCase ).__name__
_UpperCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_UpperCAmelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_UpperCAmelCase )
# Store the state_dict to file.
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'pytorch_model.bin' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 639
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 1
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_UpperCAmelCase = 'xvjiarui/stable-diffusion-2-inpainting'
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(A , safety_checker=A)
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = jax.random.PRNGKey(0)
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = num_samples * [init_image]
_UpperCAmelCase = num_samples * [mask_image]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pipeline.prepare_inputs(A , A , A)
# shard inputs and rng
_UpperCAmelCase = replicate(A)
_UpperCAmelCase = jax.random.split(A , jax.device_count())
_UpperCAmelCase = shard(A)
_UpperCAmelCase = shard(A)
_UpperCAmelCase = shard(A)
_UpperCAmelCase = pipeline(
A , A , A , A , A , A , jit=A)
_UpperCAmelCase = output.images.reshape(A , 5_12 , 5_12 , 3)
_UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten()))
_UpperCAmelCase = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4])
print(F"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
| 639
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=1 ) -> int:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = []
for old_item in old_list:
_UpperCAmelCase = old_item.replace('in_layers.0' , 'norm1' )
_UpperCAmelCase = new_item.replace('in_layers.2' , 'conv1' )
_UpperCAmelCase = new_item.replace('out_layers.0' , 'norm2' )
_UpperCAmelCase = new_item.replace('out_layers.3' , 'conv2' )
_UpperCAmelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_UpperCAmelCase = new_item.replace('skip_connection' , 'conv_shortcut' )
_UpperCAmelCase = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = []
for old_item in old_list:
_UpperCAmelCase = old_item
_UpperCAmelCase = new_item.replace('norm.weight' , 'group_norm.weight' )
_UpperCAmelCase = new_item.replace('norm.bias' , 'group_norm.bias' )
_UpperCAmelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_UpperCAmelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_UpperCAmelCase = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Optional[int]=None ) -> Tuple:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_UpperCAmelCase = old_checkpoint[path]
_UpperCAmelCase = old_tensor.shape[0] // 3
_UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_UpperCAmelCase = old_tensor.shape[0] // config['num_head_channels'] // 3
_UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
_UpperCAmelCase = query.reshape(_UpperCAmelCase )
_UpperCAmelCase = key.reshape(_UpperCAmelCase )
_UpperCAmelCase = value.reshape(_UpperCAmelCase )
for path in paths:
_UpperCAmelCase = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_UpperCAmelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_UpperCAmelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_UpperCAmelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_UpperCAmelCase = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_UpperCAmelCase = old_checkpoint[path['old']][:, :, 0]
else:
_UpperCAmelCase = old_checkpoint[path['old']]
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = {}
_UpperCAmelCase = checkpoint['time_embed.0.weight']
_UpperCAmelCase = checkpoint['time_embed.0.bias']
_UpperCAmelCase = checkpoint['time_embed.2.weight']
_UpperCAmelCase = checkpoint['time_embed.2.bias']
_UpperCAmelCase = checkpoint['input_blocks.0.0.weight']
_UpperCAmelCase = checkpoint['input_blocks.0.0.bias']
_UpperCAmelCase = checkpoint['out.0.weight']
_UpperCAmelCase = checkpoint['out.0.bias']
_UpperCAmelCase = checkpoint['out.2.weight']
_UpperCAmelCase = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_UpperCAmelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
_UpperCAmelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
_UpperCAmelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(_UpperCAmelCase )
}
for i in range(1 , _UpperCAmelCase ):
_UpperCAmelCase = (i - 1) // (config['num_res_blocks'] + 1)
_UpperCAmelCase = (i - 1) % (config['num_res_blocks'] + 1)
_UpperCAmelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_UpperCAmelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_UpperCAmelCase = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_UpperCAmelCase = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
_UpperCAmelCase = {'old': F"input_blocks.{i}.0", 'new': F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_UpperCAmelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=_UpperCAmelCase )
if len(_UpperCAmelCase ):
_UpperCAmelCase = renew_attention_paths(_UpperCAmelCase )
_UpperCAmelCase = {
'old': F"input_blocks.{i}.1",
'new': F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_UpperCAmelCase = {
F"input_blocks.{i}.1.qkv.bias": {
'key': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
'key': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase , )
_UpperCAmelCase = middle_blocks[0]
_UpperCAmelCase = middle_blocks[1]
_UpperCAmelCase = middle_blocks[2]
_UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase )
_UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase )
_UpperCAmelCase = renew_attention_paths(_UpperCAmelCase )
_UpperCAmelCase = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = i // (config['num_res_blocks'] + 1)
_UpperCAmelCase = i % (config['num_res_blocks'] + 1)
_UpperCAmelCase = [shave_segments(_UpperCAmelCase , 2 ) for name in output_blocks[i]]
_UpperCAmelCase = {}
for layer in output_block_layers:
_UpperCAmelCase , _UpperCAmelCase = layer.split('.' )[0], shave_segments(_UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_UpperCAmelCase )
else:
_UpperCAmelCase = [layer_name]
if len(_UpperCAmelCase ) > 1:
_UpperCAmelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_UpperCAmelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
_UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
_UpperCAmelCase = {'old': F"output_blocks.{i}.0", 'new': F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_UpperCAmelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_UpperCAmelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_UpperCAmelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(_UpperCAmelCase ) == 2:
_UpperCAmelCase = []
if len(_UpperCAmelCase ):
_UpperCAmelCase = renew_attention_paths(_UpperCAmelCase )
_UpperCAmelCase = {
'old': F"output_blocks.{i}.1",
'new': F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_UpperCAmelCase = {
F"output_blocks.{i}.1.qkv.bias": {
'key': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
'key': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=_UpperCAmelCase , )
else:
_UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_UpperCAmelCase = '.'.join(['output_blocks', str(_UpperCAmelCase ), path['old']] )
_UpperCAmelCase = '.'.join(['up_blocks', str(_UpperCAmelCase ), 'resnets', str(_UpperCAmelCase ), path['new']] )
_UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCAmelCase__ = json.loads(f.read())
UpperCAmelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCAmelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCAmelCase__ = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCAmelCase__ = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
from __future__ import annotations
UpperCAmelCase__ = "#"
class __lowerCAmelCase :
def __init__( self : Union[str, Any]) -> None:
"""simple docstring"""
_UpperCAmelCase = {}
def _lowerCamelCase ( self : List[Any] , A : str) -> None:
"""simple docstring"""
_UpperCAmelCase = self._trie
for char in text:
if char not in trie:
_UpperCAmelCase = {}
_UpperCAmelCase = trie[char]
_UpperCAmelCase = True
def _lowerCamelCase ( self : Tuple , A : str) -> tuple | list:
"""simple docstring"""
_UpperCAmelCase = self._trie
for char in prefix:
if char in trie:
_UpperCAmelCase = trie[char]
else:
return []
return self._elements(A)
def _lowerCamelCase ( self : List[str] , A : dict) -> tuple:
"""simple docstring"""
_UpperCAmelCase = []
for c, v in d.items():
_UpperCAmelCase = [' '] if c == END else [(c + s) for s in self._elements(A)]
result.extend(A)
return tuple(A)
UpperCAmelCase__ = Trie()
UpperCAmelCase__ = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def A ( _UpperCAmelCase : str ) -> tuple:
'''simple docstring'''
_UpperCAmelCase = trie.find_word(_UpperCAmelCase )
return tuple(string + word for word in suffixes )
def A ( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 639
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
UpperCAmelCase__ = "</w>"
UpperCAmelCase__ = "@@ "
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase__ = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , A : List[Any] , A : Tuple="<s>" , A : List[Any]="<pad>" , A : Optional[Any]="</s>" , A : Optional[int]="<unk>" , A : Tuple=False , A : int=None , **A : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(
unk_token=A , bos_token=A , eos_token=A , pad_token=A , do_lower_case=A , **A , )
_UpperCAmelCase = do_lower_case
with open(A , encoding='utf-8') as vocab_handle:
_UpperCAmelCase = json.load(A)
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_UpperCAmelCase = None
_UpperCAmelCase = None
else:
with open(A , encoding='utf-8') as merges_handle:
_UpperCAmelCase = merges_handle.read().split('\n')[:-1]
_UpperCAmelCase = [tuple(merge.split()[:2]) for merge in merges]
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = {}
@property
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
return len(self.decoder)
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self : str , A : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = get_pairs(A)
if not pairs:
return token
while True:
_UpperCAmelCase = min(A , key=lambda A: self.bpe_ranks.get(A , float('inf')))
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(A):
try:
_UpperCAmelCase = word.index(A , A)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCAmelCase = j
if word[i] == first and i < len(A) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCAmelCase = tuple(A)
_UpperCAmelCase = new_word
if len(A) == 1:
break
else:
_UpperCAmelCase = get_pairs(A)
_UpperCAmelCase = ' '.join(A)
if word == "\n " + BPE_TOKEN_MERGES:
_UpperCAmelCase = '\n' + BPE_TOKEN_MERGES
if word.endswith(A):
_UpperCAmelCase = word.replace(A , '')
_UpperCAmelCase = word.replace(' ' , A)
_UpperCAmelCase = word
return word
def _lowerCamelCase ( self : Any , A : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
_UpperCAmelCase = text.lower()
_UpperCAmelCase = text.split()
_UpperCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(A).split(' ')))
return split_tokens
def _lowerCamelCase ( self : Tuple , A : str) -> int:
"""simple docstring"""
return self.encoder.get(A , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self : str , A : int) -> str:
"""simple docstring"""
_UpperCAmelCase = self.decoder.get(A , self.unk_token)
return result
def _lowerCamelCase ( self : Tuple , A : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = ' '.join(A)
# make sure @@ tokens are concatenated
_UpperCAmelCase = ''.join(string.split(A))
return string
def _lowerCamelCase ( self : Any , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A) + '\n')
_UpperCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(A , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!')
_UpperCAmelCase = token_index
writer.write(' '.join(A) + '\n')
index += 1
return (vocab_file, merges_file)
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCAmelCase ( A ):
def __init__( self : int , A : int = 1_01) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = length
def __len__( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
return self.length
def __getitem__( self : Tuple , A : Union[str, Any]) -> int:
"""simple docstring"""
return i
class __lowerCAmelCase :
def __call__( self : Optional[Any] , A : str) -> Union[str, Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(A), "labels": torch.tensor(A)}
class __lowerCAmelCase ( nn.Module ):
def __init__( self : int) -> Tuple:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase = nn.Linear(1_20 , 80)
def _lowerCamelCase ( self : int , A : str , A : Tuple=None) -> List[str]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class __lowerCAmelCase ( A ):
@require_torch_neuroncore
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"--output_dir {output_dir}".split()
_UpperCAmelCase = ['torchrun'] + distributed_args + args
execute_subprocess_async(A , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCAmelCase ( A ):
@require_torch_multi_gpu
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"--output_dir {output_dir}".split()
_UpperCAmelCase = ['torchrun'] + distributed_args + args
execute_subprocess_async(A , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase__ = HfArgumentParser((TrainingArguments,))
UpperCAmelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
UpperCAmelCase__ = DummyDataset(dataset_length)
def A ( _UpperCAmelCase : EvalPrediction ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = list(range(len(_UpperCAmelCase ) ) )
_UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
UpperCAmelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase__ = 2
UpperCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase__ = None
| 639
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 1
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = get_activation('swish')
self.assertIsInstance(A , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = get_activation('silu')
self.assertIsInstance(A , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = get_activation('mish')
self.assertIsInstance(A , nn.Mish)
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = get_activation('gelu')
self.assertIsInstance(A , nn.GELU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
def __init__( self : Any , A : Any , A : Any=3 , A : Optional[Any]=32 , A : str=3 , A : Optional[Any]=10 , A : str=[10, 20, 30, 40] , A : int=[1, 1, 2, 1] , A : Optional[Any]=True , A : Tuple=True , A : Any="relu" , A : Union[str, Any]=3 , A : List[str]=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(A)
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCamelCase ( self : Optional[int] , A : int , A : Tuple , A : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = RegNetModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self : Optional[Any] , A : Union[str, Any] , A : Any , A : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = RegNetForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = RegNetModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , has_text_modality=A)
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds')
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=A)
for name, module in model.named_modules():
if isinstance(A , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
def check_hidden_states_output(A : Optional[Any] , A : int , A : Union[str, Any]):
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(A) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = RegNetModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(A)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4))
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = [False] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase )
def dfs(_UpperCAmelCase : Dict , _UpperCAmelCase : int ):
_UpperCAmelCase = True
_UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCAmelCase , 1 - c )
for i in range(len(_UpperCAmelCase ) ):
if not visited[i]:
dfs(_UpperCAmelCase , 0 )
for i in range(len(_UpperCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 639
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 1
|
from collections.abc import Sequence
from queue import Queue
class __lowerCAmelCase :
def __init__( self : List[str] , A : List[str] , A : Optional[Any] , A : int , A : Dict=None , A : Optional[Any]=None) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = val
_UpperCAmelCase = (start + end) // 2
_UpperCAmelCase = left
_UpperCAmelCase = right
def __repr__( self : str) -> Tuple:
"""simple docstring"""
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class __lowerCAmelCase :
def __init__( self : List[Any] , A : Sequence , A : Any) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = collection
_UpperCAmelCase = function
if self.collection:
_UpperCAmelCase = self._build_tree(0 , len(A) - 1)
def _lowerCamelCase ( self : Dict , A : Union[str, Any] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
self._update_tree(self.root , A , A)
def _lowerCamelCase ( self : Optional[Any] , A : List[str] , A : List[str]) -> List[str]:
"""simple docstring"""
return self._query_range(self.root , A , A)
def _lowerCamelCase ( self : Any , A : Optional[Any] , A : List[Any]) -> List[str]:
"""simple docstring"""
if start == end:
return SegmentTreeNode(A , A , self.collection[start])
_UpperCAmelCase = (start + end) // 2
_UpperCAmelCase = self._build_tree(A , A)
_UpperCAmelCase = self._build_tree(mid + 1 , A)
return SegmentTreeNode(A , A , self.fn(left.val , right.val) , A , A)
def _lowerCamelCase ( self : Dict , A : Any , A : Optional[int] , A : Union[str, Any]) -> Dict:
"""simple docstring"""
if node.start == i and node.end == i:
_UpperCAmelCase = val
return
if i <= node.mid:
self._update_tree(node.left , A , A)
else:
self._update_tree(node.right , A , A)
_UpperCAmelCase = self.fn(node.left.val , node.right.val)
def _lowerCamelCase ( self : List[str] , A : Any , A : Tuple , A : Union[str, Any]) -> Tuple:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , A , A)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , A , node.mid) , self._query_range(node.right , node.mid + 1 , A) , )
else:
# range in right child tree
return self._query_range(node.right , A , A)
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
if self.root is not None:
_UpperCAmelCase = Queue()
queue.put(self.root)
while not queue.empty():
_UpperCAmelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
UpperCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 639
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 1
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCAmelCase__ = get_logger(__name__)
UpperCAmelCase__ = Path(__file__).parent / "model_card_template.md"
UpperCAmelCase__ = uuida().hex
UpperCAmelCase__ = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase__ = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def A ( _UpperCAmelCase : Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
_UpperCAmelCase = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + user_agent
return ua
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
_UpperCAmelCase = HfFolder.get_token()
if organization is None:
_UpperCAmelCase = whoami(_UpperCAmelCase )['name']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(_UpperCAmelCase , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
_UpperCAmelCase = args.hub_token if hasattr(_UpperCAmelCase , 'hub_token' ) else None
_UpperCAmelCase = get_full_repo_name(_UpperCAmelCase , token=_UpperCAmelCase )
_UpperCAmelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_UpperCAmelCase , model_name=_UpperCAmelCase , repo_name=_UpperCAmelCase , dataset_name=args.dataset_name if hasattr(_UpperCAmelCase , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_UpperCAmelCase , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_UpperCAmelCase , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_UpperCAmelCase , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_UpperCAmelCase , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_UpperCAmelCase , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_UpperCAmelCase , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(_UpperCAmelCase , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_UpperCAmelCase , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
_UpperCAmelCase = os.path.join(args.output_dir , 'README.md' )
model_card.save(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
_UpperCAmelCase = str(Path(_UpperCAmelCase ).as_posix() )
_UpperCAmelCase = re.search(R'snapshots/([^/]+)/' , _UpperCAmelCase )
if search is None:
return None
_UpperCAmelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCAmelCase__ = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
UpperCAmelCase__ = os.path.join(hf_cache_home, "diffusers")
def A ( _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
_UpperCAmelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
_UpperCAmelCase = old_diffusers_cache
_UpperCAmelCase = Path(_UpperCAmelCase ).expanduser()
_UpperCAmelCase = Path(_UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_UpperCAmelCase = new_cache_dir / old_blob_path.relative_to(_UpperCAmelCase )
new_blob_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
os.replace(_UpperCAmelCase , _UpperCAmelCase )
try:
os.symlink(_UpperCAmelCase , _UpperCAmelCase )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCAmelCase__ = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
UpperCAmelCase__ = 0
else:
with open(cache_version_file) as f:
try:
UpperCAmelCase__ = int(f.read())
except ValueError:
UpperCAmelCase__ = 0
if cache_version < 1:
UpperCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
UpperCAmelCase__ = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"the directory exists and can be written to."
)
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
_UpperCAmelCase = weights_name.split('.' )
_UpperCAmelCase = splits[:-1] + [variant] + splits[-1:]
_UpperCAmelCase = '.'.join(_UpperCAmelCase )
return weights_name
def A ( _UpperCAmelCase : Union[str, Any] , *,
_UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any=None , ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(_UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(_UpperCAmelCase ):
if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ):
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_UpperCAmelCase ).base_version ) >= version.parse('0.20.0' )
):
try:
_UpperCAmelCase = hf_hub_download(
_UpperCAmelCase , filename=_add_variant(_UpperCAmelCase , _UpperCAmelCase ) , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , user_agent=_UpperCAmelCase , subfolder=_UpperCAmelCase , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _UpperCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_UpperCAmelCase , _UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_UpperCAmelCase , _UpperCAmelCase )}' so that the correct variant file can be added." , _UpperCAmelCase , )
try:
# 2. Load model file as usual
_UpperCAmelCase = hf_hub_download(
_UpperCAmelCase , filename=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , user_agent=_UpperCAmelCase , subfolder=_UpperCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'this model name. Check the model page at '
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 639
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 1
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase__ = 50_0000
UpperCAmelCase__ , UpperCAmelCase__ = os.path.split(__file__)
UpperCAmelCase__ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def A ( _UpperCAmelCase : datasets.Dataset , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = dataset.map(**_UpperCAmelCase )
@get_duration
def A ( _UpperCAmelCase : datasets.Dataset , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = dataset.filter(**_UpperCAmelCase )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(_UpperCAmelCase , 'dataset.arrow' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase )
_UpperCAmelCase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase )
def tokenize(_UpperCAmelCase : List[str] ):
return tokenizer(examples['text'] )
_UpperCAmelCase = map(_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , batched=_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='numpy' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='pandas' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , function=_UpperCAmelCase , batched=_UpperCAmelCase )
_UpperCAmelCase = filter(_UpperCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 639
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class __lowerCAmelCase ( A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({'''summary''': Value('''string''' )} )
UpperCamelCase = "text"
UpperCamelCase = "summary"
@property
def _lowerCamelCase ( self : Optional[Any]) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 639
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 1
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __lowerCAmelCase :
UpperCamelCase = None
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = None
UpperCamelCase = 1
UpperCamelCase = None
UpperCamelCase = False
UpperCamelCase = None
UpperCamelCase = None
def _lowerCamelCase ( self : Optional[int]) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(A) for k, v in self.__dict__.items()})
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
import pprint
import requests
UpperCAmelCase__ = "https://zenquotes.io/api"
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCAmelCase__ = random_quotes()
pprint.pprint(response)
| 639
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 1
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(A):
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = FlaxAutoModel.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(A):
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = FlaxAutoModel.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(A)
_UpperCAmelCase = FlaxBertModel.from_pretrained(A)
_UpperCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**A : List[Any]):
return model(**A)
eval(**A).block_until_ready()
@slow
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(A)
_UpperCAmelCase = FlaxRobertaModel.from_pretrained(A)
_UpperCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**A : List[Any]):
return model(**A)
eval(**A).block_until_ready()
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
A , 'bert-base is not a local folder and is not a valid model identifier'):
_UpperCAmelCase = FlaxAutoModel.from_pretrained('bert-base')
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_UpperCAmelCase = FlaxAutoModel.from_pretrained(A , revision='aaaaaa')
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
A , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_UpperCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(A , 'Use `from_pt=True` to load this model'):
_UpperCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 639
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
UpperCAmelCase__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"emoji": True,
},
}
]
UpperCAmelCase__ = 0
for log in Path().glob("*.log"):
UpperCAmelCase__ = 0
with open(log, "r") as f:
for line in f:
UpperCAmelCase__ = json.loads(line)
if line.get("nodeid", "") != "":
UpperCAmelCase__ = line["nodeid"]
if line.get("duration", None) is not None:
UpperCAmelCase__ = f"""{line["duration"]:.4f}"""
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase__ = []
log.unlink()
UpperCAmelCase__ = ""
UpperCAmelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase__ = []
UpperCAmelCase__ = {}
for test in failed_tests:
UpperCAmelCase__ = test[0].split("::")
UpperCAmelCase__ = data[0].split("/")[-1]
if data[0] not in filesafailed:
UpperCAmelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase__ = [test[0] for test in failed_table]
UpperCAmelCase__ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase__ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCAmelCase__ = "Too many failed tests, please see the full report in the Action results."
UpperCAmelCase__ = len(err) + 10
UpperCAmelCase__ = message[: 3000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase__ = "No failed tests! 🤗"
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
UpperCAmelCase__ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
UpperCAmelCase__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
UpperCAmelCase__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase__ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
UpperCAmelCase__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase__ = row[0]
else:
UpperCAmelCase__ = ""
UpperCAmelCase__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int = 0 ) -> list:
'''simple docstring'''
_UpperCAmelCase = length or len(_UpperCAmelCase )
_UpperCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = list_data[i + 1], list_data[i]
_UpperCAmelCase = True
return list_data if not swapped else bubble_sort(_UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''pix2struct_text_model'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , A : List[Any]=5_02_44 , A : str=7_68 , A : List[Any]=64 , A : Union[str, Any]=20_48 , A : List[Any]=12 , A : Tuple=12 , A : Tuple=32 , A : List[Any]=1_28 , A : List[Any]=0.1 , A : Optional[Any]=1E-6 , A : Tuple=1.0 , A : List[Any]="gelu_new" , A : Optional[int]=0 , A : Any=False , A : List[str]=0 , A : List[Any]=1 , A : Union[str, Any]=False , A : Optional[int]=True , **A : Any , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = use_cache
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
_UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , tie_word_embeddings=A , is_decoder=A , **A , )
@classmethod
def _lowerCamelCase ( cls : List[str] , A : Union[str, os.PathLike] , **A : List[str]) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A)
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(A , **A)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
_UpperCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(A , **A)
class __lowerCAmelCase ( A ):
UpperCamelCase = '''pix2struct_vision_model'''
def __init__( self : Tuple , A : str=7_68 , A : Dict=7_68 , A : Optional[Any]=20_48 , A : Any=64 , A : Dict=12 , A : Tuple=12 , A : Tuple="gelu_new" , A : Optional[int]=1E-6 , A : List[str]=0.0 , A : Optional[int]=0.0 , A : Optional[int]=1E-10 , A : Any=1.0 , A : List[str]=40_96 , A : List[str]=32 , A : List[str]=1_28 , **A : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = hidden_size
_UpperCAmelCase = patch_embed_hidden_size
_UpperCAmelCase = d_ff
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = dense_act_fn
_UpperCAmelCase = seq_len
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = d_kv
@classmethod
def _lowerCamelCase ( cls : str , A : Union[str, os.PathLike] , **A : Union[str, Any]) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A)
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(A , **A)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
_UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(A , **A)
class __lowerCAmelCase ( A ):
UpperCamelCase = '''pix2struct'''
UpperCamelCase = True
def __init__( self : Union[str, Any] , A : Union[str, Any]=None , A : Tuple=None , A : Union[str, Any]=1.0 , A : Tuple=0.0_2 , A : Any=False , A : int=False , A : Optional[int]=True , **A : Tuple , ) -> int:
"""simple docstring"""
super().__init__(tie_word_embeddings=A , is_encoder_decoder=A , **A)
if text_config is None:
_UpperCAmelCase = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.')
if vision_config is None:
_UpperCAmelCase = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.')
_UpperCAmelCase = PixaStructTextConfig(**A)
_UpperCAmelCase = PixaStructVisionConfig(**A)
_UpperCAmelCase = self.text_config.decoder_start_token_id
_UpperCAmelCase = self.text_config.pad_token_id
_UpperCAmelCase = self.text_config.eos_token_id
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = is_vqa
@classmethod
def _lowerCamelCase ( cls : List[Any] , A : PixaStructTextConfig , A : PixaStructVisionConfig , **A : str) -> str:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A)
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__)
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 639
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = 42
UpperCamelCase = 42
class __lowerCAmelCase ( nn.Module ):
UpperCamelCase = 42
UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
UpperCamelCase = jnp.floataa
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = []
for i in range(len(self.block_out_channels) - 1):
_UpperCAmelCase = self.block_out_channels[i]
_UpperCAmelCase = self.block_out_channels[i + 1]
_UpperCAmelCase = nn.Conv(
A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A)
_UpperCAmelCase = nn.Conv(
A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A)
_UpperCAmelCase = blocks
_UpperCAmelCase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , A : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.conv_in(A)
_UpperCAmelCase = nn.silu(A)
for block in self.blocks:
_UpperCAmelCase = block(A)
_UpperCAmelCase = nn.silu(A)
_UpperCAmelCase = self.conv_out(A)
return embedding
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , A , A ):
UpperCamelCase = 3_2
UpperCamelCase = 4
UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase = False
UpperCamelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
UpperCamelCase = 2
UpperCamelCase = 8
UpperCamelCase = None
UpperCamelCase = 1_2_8_0
UpperCamelCase = 0.0
UpperCamelCase = False
UpperCamelCase = jnp.floataa
UpperCamelCase = True
UpperCamelCase = 0
UpperCamelCase = "rgb"
UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
def _lowerCamelCase ( self : str , A : jax.random.KeyArray) -> FrozenDict:
"""simple docstring"""
_UpperCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCAmelCase = jnp.zeros(A , dtype=jnp.floataa)
_UpperCAmelCase = jnp.ones((1,) , dtype=jnp.intaa)
_UpperCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
_UpperCAmelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCAmelCase = jnp.zeros(A , dtype=jnp.floataa)
_UpperCAmelCase , _UpperCAmelCase = jax.random.split(A)
_UpperCAmelCase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(A , A , A , A , A)["params"]
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.block_out_channels
_UpperCAmelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
_UpperCAmelCase = FlaxTimestepEmbedding(A , dtype=self.dtype)
_UpperCAmelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCAmelCase = self.only_cross_attention
if isinstance(A , A):
_UpperCAmelCase = (only_cross_attention,) * len(self.down_block_types)
if isinstance(A , A):
_UpperCAmelCase = (num_attention_heads,) * len(self.down_block_types)
# down
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = block_out_channels[0]
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A)
for i, down_block_type in enumerate(self.down_block_types):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
_UpperCAmelCase = i == len(A) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCAmelCase = FlaxDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A)
for _ in range(self.layers_per_block):
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A)
if not is_final_block:
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A)
_UpperCAmelCase = down_blocks
_UpperCAmelCase = controlnet_down_blocks
# mid
_UpperCAmelCase = block_out_channels[-1]
_UpperCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , A : Any , A : Any , A : Any , A : List[str] , A : float = 1.0 , A : bool = True , A : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCAmelCase = jnp.flip(A , axis=1)
# 1. time
if not isinstance(A , jnp.ndarray):
_UpperCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(A , jnp.ndarray) and len(timesteps.shape) == 0:
_UpperCAmelCase = timesteps.astype(dtype=jnp.floataa)
_UpperCAmelCase = jnp.expand_dims(A , 0)
_UpperCAmelCase = self.time_proj(A)
_UpperCAmelCase = self.time_embedding(A)
# 2. pre-process
_UpperCAmelCase = jnp.transpose(A , (0, 2, 3, 1))
_UpperCAmelCase = self.conv_in(A)
_UpperCAmelCase = jnp.transpose(A , (0, 2, 3, 1))
_UpperCAmelCase = self.controlnet_cond_embedding(A)
sample += controlnet_cond
# 3. down
_UpperCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(A , A):
_UpperCAmelCase , _UpperCAmelCase = down_block(A , A , A , deterministic=not train)
else:
_UpperCAmelCase , _UpperCAmelCase = down_block(A , A , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
_UpperCAmelCase = self.mid_block(A , A , A , deterministic=not train)
# 5. contronet blocks
_UpperCAmelCase = ()
for down_block_res_sample, controlnet_block in zip(A , self.controlnet_down_blocks):
_UpperCAmelCase = controlnet_block(A)
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCAmelCase = controlnet_down_block_res_samples
_UpperCAmelCase = self.controlnet_mid_block(A)
# 6. scaling
_UpperCAmelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A , mid_block_res_sample=A)
| 639
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 1
|
import fire
from utils import calculate_rouge, save_json
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
_UpperCAmelCase = [x.strip() for x in open(_UpperCAmelCase ).readlines()]
_UpperCAmelCase = [x.strip() for x in open(_UpperCAmelCase ).readlines()][: len(_UpperCAmelCase )]
_UpperCAmelCase = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
if save_path is not None:
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''funnel'''
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Any , A : Any=3_05_22 , A : Tuple=[4, 4, 4] , A : Tuple=None , A : Union[str, Any]=2 , A : int=7_68 , A : Union[str, Any]=12 , A : Any=64 , A : List[str]=30_72 , A : List[str]="gelu_new" , A : List[str]=0.1 , A : Union[str, Any]=0.1 , A : str=0.0 , A : Union[str, Any]=0.1 , A : Tuple=None , A : List[str]=1E-9 , A : Union[str, Any]="mean" , A : List[str]="relative_shift" , A : Dict=True , A : List[str]=True , A : Dict=True , **A : Tuple , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = block_sizes
_UpperCAmelCase = [1] * len(A) if block_repeats is None else block_repeats
assert len(A) == len(
self.block_repeats), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase = num_decoder_layers
_UpperCAmelCase = d_model
_UpperCAmelCase = n_head
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_std
_UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
_UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
_UpperCAmelCase = attention_type
_UpperCAmelCase = separate_cls
_UpperCAmelCase = truncate_seq
_UpperCAmelCase = pool_q_only
super().__init__(**A)
@property
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
return sum(self.block_sizes)
@num_hidden_layers.setter
def _lowerCamelCase ( self : Optional[int] , A : Dict) -> Dict:
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.')
@property
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
return len(self.block_sizes)
@num_blocks.setter
def _lowerCamelCase ( self : Union[str, Any] , A : int) -> List[Any]:
"""simple docstring"""
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.')
| 639
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
# Load configuration defined in the metadata file
with open(_UpperCAmelCase ) as metadata_file:
_UpperCAmelCase = json.load(_UpperCAmelCase )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_UpperCAmelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )['module']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_UpperCAmelCase )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('<ent>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_UpperCAmelCase = AddedToken('<ent2>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'r' ) as f:
_UpperCAmelCase = json.load(_UpperCAmelCase )
_UpperCAmelCase = 'MLukeTokenizer'
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_UpperCAmelCase )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
_UpperCAmelCase = state_dict['embeddings.word_embeddings.weight']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = F"encoder.layer.{layer_index}.attention.self."
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['entity_embeddings.entity_embeddings.weight']
_UpperCAmelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['entity_predictions.bias']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_UpperCAmelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if set(_UpperCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(_UpperCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_UpperCAmelCase , task='entity_classification' )
_UpperCAmelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_UpperCAmelCase , entity_spans=[span] , return_tensors='pt' )
_UpperCAmelCase = model(**_UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = 'Tokyo is the capital of <mask>.'
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_UpperCAmelCase , entity_spans=[span] , return_tensors='pt' )
_UpperCAmelCase = model(**_UpperCAmelCase )
_UpperCAmelCase = encoding['input_ids'][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_UpperCAmelCase )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_UpperCAmelCase ) )
model.save_pretrained(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = ['[MASK]', '[PAD]', '[UNK]']
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in open(_UpperCAmelCase )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = F"{language}:{entity_name}"
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
UpperCAmelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
from __future__ import annotations
UpperCAmelCase__ = tuple[int, int, int]
UpperCAmelCase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCAmelCase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCAmelCase__ = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCAmelCase__ = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCAmelCase__ = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCAmelCase__ = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCAmelCase__ = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCAmelCase__ = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCAmelCase__ = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCAmelCase__ = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCAmelCase__ = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCAmelCase__ = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def A ( _UpperCAmelCase : RotorPositionT , _UpperCAmelCase : RotorSelectionT , _UpperCAmelCase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
_UpperCAmelCase = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
_UpperCAmelCase = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
_UpperCAmelCase = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
_UpperCAmelCase = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
_UpperCAmelCase = _plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def A ( _UpperCAmelCase : str ) -> dict[str, str]:
'''simple docstring'''
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Plugboard setting isn't type string ({type(_UpperCAmelCase )})"
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
_UpperCAmelCase = F"Odd number of symbols ({len(_UpperCAmelCase )})"
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_UpperCAmelCase = set()
for i in pbstring:
if i not in abc:
_UpperCAmelCase = F"'{i}' not in list of symbols"
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
_UpperCAmelCase = F"Duplicate symbol ({i})"
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
_UpperCAmelCase = {}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
_UpperCAmelCase = pbstring[j + 1]
_UpperCAmelCase = pbstring[j]
return pb
def A ( _UpperCAmelCase : str , _UpperCAmelCase : RotorPositionT , _UpperCAmelCase : RotorSelectionT = (rotora, rotora, rotora) , _UpperCAmelCase : str = "" , ) -> str:
'''simple docstring'''
_UpperCAmelCase = text.upper()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rotor_position
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_UpperCAmelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_UpperCAmelCase = plugboard[symbol]
# rotor ra --------------------------
_UpperCAmelCase = abc.index(_UpperCAmelCase ) + rotorposa
_UpperCAmelCase = rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
_UpperCAmelCase = abc.index(_UpperCAmelCase ) + rotorposa
_UpperCAmelCase = rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
_UpperCAmelCase = abc.index(_UpperCAmelCase ) + rotorposa
_UpperCAmelCase = rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_UpperCAmelCase = reflector[symbol]
# 2nd rotors
_UpperCAmelCase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
_UpperCAmelCase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
_UpperCAmelCase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_UpperCAmelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
_UpperCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
_UpperCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
_UpperCAmelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = "This is my Python script that emulates the Enigma machine from WWII."
UpperCAmelCase__ = (1, 1, 1)
UpperCAmelCase__ = "pictures"
UpperCAmelCase__ = (rotora, rotora, rotora)
UpperCAmelCase__ = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 639
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
def __init__( self : Dict , *A : List[Any] , **A : List[Any]) -> None:
"""simple docstring"""
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , A , )
super().__init__(*A , **A)
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
import functools
from typing import Any
def A ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] ) -> bool:
'''simple docstring'''
# Validation
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not all(
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_UpperCAmelCase = {}
_UpperCAmelCase = 'WORD_KEEPER'
for word in words:
_UpperCAmelCase = trie
for c in word:
if c not in trie_node:
_UpperCAmelCase = {}
_UpperCAmelCase = trie_node[c]
_UpperCAmelCase = True
_UpperCAmelCase = len(_UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCAmelCase : int ) -> bool:
if index == len_string:
return True
_UpperCAmelCase = trie
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = trie_node.get(string[i] , _UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(_UpperCAmelCase , _UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
_UpperCAmelCase = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
_UpperCAmelCase = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCAmelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
if isinstance(_UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = [image]
_UpperCAmelCase = [trans(img.convert('RGB' ) ) for img in image]
_UpperCAmelCase = torch.stack(_UpperCAmelCase )
return image
class __lowerCAmelCase ( A ):
def __init__( self : List[Any] , A : Optional[Any] , A : int) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=A , scheduler=A)
def _lowerCamelCase ( self : Tuple , A : Any) -> List[str]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}")
def _lowerCamelCase ( self : List[str] , A : Any , A : Tuple , A : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = min(int(num_inference_steps * strength) , A)
_UpperCAmelCase = max(num_inference_steps - init_timestep , 0)
_UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self : Dict , A : List[str] , A : Any , A : List[Any] , A : List[str] , A : str , A : Any=None) -> int:
"""simple docstring"""
if not isinstance(A , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A)}")
_UpperCAmelCase = image.to(device=A , dtype=A)
if isinstance(A , A) and len(A) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(A)}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators.")
_UpperCAmelCase = init_latents.shape
_UpperCAmelCase = randn_tensor(A , generator=A , device=A , dtype=A)
# get latents
print('add noise to latents at timestep' , A)
_UpperCAmelCase = self.scheduler.add_noise(A , A , A)
_UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , A : Union[torch.FloatTensor, PIL.Image.Image] = None , A : float = 0.8 , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : float = 0.0 , A : int = 50 , A : Optional[bool] = None , A : Optional[str] = "pil" , A : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(A)
# 2. Preprocess image
_UpperCAmelCase = preprocess(A)
# 3. set timesteps
self.scheduler.set_timesteps(A , device=self.device)
_UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(A , A , self.device)
_UpperCAmelCase = timesteps[:1].repeat(A)
# 4. Prepare latent variables
_UpperCAmelCase = self.prepare_latents(A , A , A , self.unet.dtype , self.device , A)
_UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(A):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(A , A).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
A , A , A , eta=A , use_clipped_model_output=A , generator=A , ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(A)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=A)
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = 42
class __lowerCAmelCase ( A , A ):
@register_to_config
def __init__( self : List[Any] , A : int = 16 , A : int = 88 , A : Optional[int] = None , A : Optional[int] = None , A : int = 1 , A : float = 0.0 , A : int = 32 , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : str = "geglu" , A : bool = True , A : bool = True , ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = attention_head_dim
_UpperCAmelCase = num_attention_heads * attention_head_dim
_UpperCAmelCase = in_channels
_UpperCAmelCase = torch.nn.GroupNorm(num_groups=A , num_channels=A , eps=1E-6 , affine=A)
_UpperCAmelCase = nn.Linear(A , A)
# 3. Define transformers blocks
_UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
A , A , A , dropout=A , cross_attention_dim=A , activation_fn=A , attention_bias=A , double_self_attention=A , norm_elementwise_affine=A , )
for d in range(A)
])
_UpperCAmelCase = nn.Linear(A , A)
def _lowerCamelCase ( self : List[str] , A : Union[str, Any] , A : Optional[Any]=None , A : Tuple=None , A : Tuple=None , A : List[str]=1 , A : Optional[int]=None , A : bool = True , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape
_UpperCAmelCase = batch_frames // num_frames
_UpperCAmelCase = hidden_states
_UpperCAmelCase = hidden_states[None, :].reshape(A , A , A , A , A)
_UpperCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4)
_UpperCAmelCase = self.norm(A)
_UpperCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , A , A)
_UpperCAmelCase = self.proj_in(A)
# 2. Blocks
for block in self.transformer_blocks:
_UpperCAmelCase = block(
A , encoder_hidden_states=A , timestep=A , cross_attention_kwargs=A , class_labels=A , )
# 3. Output
_UpperCAmelCase = self.proj_out(A)
_UpperCAmelCase = (
hidden_states[None, None, :]
.reshape(A , A , A , A , A)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
_UpperCAmelCase = hidden_states.reshape(A , A , A , A)
_UpperCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=A)
| 639
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 1
|
UpperCAmelCase__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 639
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.