code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = int(lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple=3_0_0 ):
# docstyle-ignore
return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowercase = f"{elt:.6f}" if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowercase :
'''simple docstring'''
a : Optional[Any] = 5
a : str = 0.2
def __init__(self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 300 ,) -> List[str]:
'''simple docstring'''
__lowercase = total
__lowercase = '''''' if prefix is None else prefix
__lowercase = leave
__lowercase = parent
__lowercase = width
__lowercase = None
__lowercase = None
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = False ,_lowerCamelCase = None ) -> int:
'''simple docstring'''
__lowercase = value
if comment is not None:
__lowercase = comment
if self.last_value is None:
__lowercase = __lowercase = time.time()
__lowercase = __lowercase = value
__lowercase = __lowercase = None
__lowercase = self.warmup
__lowercase = 1
self.update_bar(_lowerCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowercase = time.time()
__lowercase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowercase = self.elapsed_time / (value - self.start_value)
else:
__lowercase = None
if value >= self.total:
__lowercase = self.total
__lowercase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowercase = self.average_time_per_item * (self.total - value)
self.update_bar(_lowerCamelCase )
__lowercase = value
__lowercase = current_time
if self.average_time_per_item is None:
__lowercase = 1
else:
__lowercase = max(int(self.update_every / self.average_time_per_item ) ,1 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Dict:
'''simple docstring'''
__lowercase = ''' ''' * (len(str(self.total ) ) - len(str(_lowerCamelCase ) )) + str(_lowerCamelCase )
if self.elapsed_time is None:
__lowercase = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
__lowercase = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"
else:
__lowercase = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"
f" {format_time(self.predicted_remaining )}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f", {self.comment}]"
self.display()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowercase = disp.display(disp.HTML(self.html_code ) ,display_id=_lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Any:
'''simple docstring'''
super().__init__(_lowerCamelCase )
__lowercase = None if column_names is None else [column_names]
__lowercase = None
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowercase = disp.display(disp.HTML(self.html_code ) ,display_id=_lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if self.inner_table is None:
__lowercase = [list(values.keys() ), list(values.values() )]
else:
__lowercase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_lowerCamelCase )
__lowercase = columns
self.inner_table.append([values[c] for c in columns] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ,_lowerCamelCase=300 ) -> int:
'''simple docstring'''
__lowercase = NotebookProgressBar(_lowerCamelCase ,prefix=_lowerCamelCase ,parent=self ,width=_lowerCamelCase )
return self.child_bar
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = None
self.display()
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ) -> List[Any]:
'''simple docstring'''
__lowercase = None
__lowercase = None
__lowercase = False
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__lowercase = 0
__lowercase = 0
__lowercase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__lowercase = NotebookTrainingTracker(state.max_steps ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 ,comment=f"Epoch {epoch}/{state.num_train_epochs}" ,force_update=self._force_next_update ,)
__lowercase = False
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if not has_length(_lowerCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowercase = self.training_tracker.add_child(len(_lowerCamelCase ) )
else:
__lowercase = NotebookProgressBar(len(_lowerCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowercase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowercase = state.global_step
self.training_tracker.write_line(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
if self.training_tracker is not None:
__lowercase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__lowercase = log['''loss''']
break
if self.first_column == "Epoch":
__lowercase = int(state.epoch )
else:
__lowercase = state.global_step
__lowercase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__lowercase = re.sub(R'''\_loss$''' ,'''''' ,_lowerCamelCase )
__lowercase = metrics.pop('''total_flos''' ,_lowerCamelCase )
__lowercase = metrics.pop('''epoch''' ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_runtime" ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_samples_per_second" ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_steps_per_second" ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" ,_lowerCamelCase )
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
__lowercase = v
else:
__lowercase = k.split('''_''' )
__lowercase = ''' '''.join([part.capitalize() for part in splits[1:]] )
__lowercase = v
self.training_tracker.write_line(_lowerCamelCase )
self.training_tracker.remove_child()
__lowercase = None
# Evaluation takes a long time so we should force the next update.
__lowercase = True
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step ,comment=f"Epoch {int(state.epoch )}/{state.num_train_epochs}" ,force_update=_lowerCamelCase )
__lowercase = None
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_SCREAMING_SNAKE_CASE = '''scheduler_config.json'''
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = 1
a : Dict = 2
a : Optional[Any] = 3
a : List[str] = 4
a : Any = 5
@dataclass
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : jnp.ndarray
class __lowercase :
'''simple docstring'''
a : str = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = ["dtype"]
a : str = []
a : List[Any] = True
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase ,subfolder=_lowerCamelCase ,return_unused_kwargs=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase , __lowercase = cls.from_config(_lowerCamelCase ,return_unused_kwargs=_lowerCamelCase ,**_lowerCamelCase )
if hasattr(_lowerCamelCase ,'''create_state''' ) and getattr(_lowerCamelCase ,'''has_state''' ,_lowerCamelCase ):
__lowercase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = False ,**_lowerCamelCase ) -> str:
'''simple docstring'''
self.save_config(save_directory=_lowerCamelCase ,push_to_hub=_lowerCamelCase ,**_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _UpperCAmelCase (cls ) -> int:
'''simple docstring'''
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split('''.''' )[0] )
__lowercase = [
getattr(_lowerCamelCase ,_lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase ,_lowerCamelCase )
]
return compatible_classes
def _lowerCAmelCase ( lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : Tuple[int] ):
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any]=0.9_99 , lowerCamelCase_ : Union[str, Any]=jnp.floataa ):
def alpha_bar(lowerCamelCase_ : Any ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__lowercase = []
for i in range(lowerCamelCase_ ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class __lowercase :
'''simple docstring'''
a : jnp.ndarray
a : jnp.ndarray
a : jnp.ndarray
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = scheduler.config
if config.trained_betas is not None:
__lowercase = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowercase = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
__lowercase = 1.0 - betas
__lowercase = jnp.cumprod(_lowerCamelCase ,axis=0 )
return cls(
alphas=_lowerCamelCase ,betas=_lowerCamelCase ,alphas_cumprod=_lowerCamelCase ,)
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase = state.alphas_cumprod
__lowercase = alphas_cumprod[timesteps] ** 0.5
__lowercase = sqrt_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
__lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowercase = sqrt_one_minus_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase , __lowercase = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase , __lowercase = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 710
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def _lowerCAmelCase ( ):
__lowercase = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = 1_0_0_0
__lowercase = '''huggingface/label-files'''
__lowercase = num_labels
__lowercase = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
__lowercase = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = __lowercase = CvtConfig(num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__lowercase = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__lowercase = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__lowercase = [2, 2, 2_0]
__lowercase = [3, 1_2, 1_6]
__lowercase = [1_9_2, 7_6_8, 1_0_2_4]
__lowercase = CvtForImageClassification(lowerCamelCase_ )
__lowercase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__lowercase = image_size
__lowercase = torch.load(lowerCamelCase_ , map_location=torch.device('''cpu''' ) )
__lowercase = OrderedDict()
__lowercase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__lowercase = list_of_state_dict + cls_token(lowerCamelCase_ )
__lowercase = list_of_state_dict + embeddings(lowerCamelCase_ )
for cnt in range(config.depth[idx] ):
__lowercase = list_of_state_dict + attention(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
__lowercase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowercase = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__lowercase = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__lowercase = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 0
|
'''simple docstring'''
from typing import Any
def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : list , lowerCamelCase_ : dict , lowerCamelCase_ : dict , lowerCamelCase_ : dict , ):
_validation(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# Creates data structures and fill initial step
__lowercase = {}
__lowercase = {}
for state in states_space:
__lowercase = observations_space[0]
__lowercase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__lowercase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCamelCase_ ) ):
__lowercase = observations_space[o]
__lowercase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__lowercase = ''''''
__lowercase = -1
for k_state in states_space:
__lowercase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__lowercase = probability
__lowercase = k_state
# Update probabilities and pointers dicts
__lowercase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__lowercase = arg_max
# The final observation
__lowercase = observations_space[len(lowerCamelCase_ ) - 1]
# argmax for given final observation
__lowercase = ''''''
__lowercase = -1
for k_state in states_space:
__lowercase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__lowercase = probability
__lowercase = k_state
__lowercase = arg_max
# Process pointers backwards
__lowercase = last_state
__lowercase = []
for o in range(len(lowerCamelCase_ ) - 1 , -1 , -1 ):
result.append(lowerCamelCase_ )
__lowercase = pointers[previous, observations_space[o]]
result.reverse()
return result
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , ):
_validate_not_empty(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
_validate_lists(lowerCamelCase_ , lowerCamelCase_ )
_validate_dicts(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
_validate_list(lowerCamelCase_ , '''observations_space''' )
_validate_list(lowerCamelCase_ , '''states_space''' )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : str ):
if not isinstance(_object , lowerCamelCase_ ):
__lowercase = f"{var_name} must be a list"
raise ValueError(lowerCamelCase_ )
else:
for x in _object:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = f"{var_name} must be a list of strings"
raise ValueError(lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , ):
_validate_dict(lowerCamelCase_ , '''initial_probabilities''' , lowerCamelCase_ )
_validate_nested_dict(lowerCamelCase_ , '''transition_probabilities''' )
_validate_nested_dict(lowerCamelCase_ , '''emission_probabilities''' )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : str ):
_validate_dict(_object , lowerCamelCase_ , lowerCamelCase_ )
for x in _object.values():
_validate_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : type , lowerCamelCase_ : bool = False ):
if not isinstance(_object , lowerCamelCase_ ):
__lowercase = f"{var_name} must be a dict"
raise ValueError(lowerCamelCase_ )
if not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for x in _object ):
__lowercase = f"{var_name} all keys must be strings"
raise ValueError(lowerCamelCase_ )
if not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for x in _object.values() ):
__lowercase = '''nested dictionary ''' if nested else ''''''
__lowercase = f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 715
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = 0
for ch in input_str:
__lowercase = ord(lowerCamelCase_ )
__lowercase = pow(2 , lowerCamelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 717
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : torch.FloatTensor
a : torch.FloatTensor
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
a : Any = 1
@register_to_config
def __init__(self ,_lowerCamelCase = 2000 ,_lowerCamelCase = 0.1_5 ,_lowerCamelCase = 0.0_1 ,_lowerCamelCase = 1348.0 ,_lowerCamelCase = 1E-5 ,_lowerCamelCase = 1 ,) -> List[str]:
'''simple docstring'''
__lowercase = sigma_max
# setable values
__lowercase = None
self.set_sigmas(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ) -> List[str]:
'''simple docstring'''
__lowercase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase = torch.linspace(1 ,_lowerCamelCase ,_lowerCamelCase ,device=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ) -> List[Any]:
'''simple docstring'''
__lowercase = sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase = sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowerCamelCase ,_lowerCamelCase )
__lowercase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase = torch.exp(torch.linspace(math.log(_lowerCamelCase ) ,math.log(_lowerCamelCase ) ,_lowerCamelCase ) )
__lowercase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device ) ) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device ) ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = True ,) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
__lowercase = timestep * torch.ones(
sample.shape[0] ,device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase = timesteps.to(self.discrete_sigmas.device )
__lowercase = self.discrete_sigmas[timesteps].to(sample.device )
__lowercase = self.get_adjacent_sigma(_lowerCamelCase ,_lowerCamelCase ).to(sample.device )
__lowercase = torch.zeros_like(_lowerCamelCase )
__lowercase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase = diffusion.unsqueeze(-1 )
__lowercase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase = randn_tensor(
sample.shape ,layout=sample.layout ,generator=_lowerCamelCase ,device=sample.device ,dtype=sample.dtype )
__lowercase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowerCamelCase ,prev_sample_mean=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = True ,) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase = randn_tensor(sample.shape ,layout=sample.layout ,generator=_lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase = torch.norm(model_output.reshape(model_output.shape[0] ,-1 ) ,dim=-1 ).mean()
__lowercase = torch.norm(noise.reshape(noise.shape[0] ,-1 ) ,dim=-1 ).mean()
__lowercase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase = step_size.unsqueeze(-1 )
__lowercase = sample + step_size * model_output
__lowercase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> torch.FloatTensor:
'''simple docstring'''
__lowercase = timesteps.to(original_samples.device )
__lowercase = self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowerCamelCase ) * sigmas[:, None, None, None]
)
__lowercase = noise + original_samples
return noisy_samples
def __len__(self ) -> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 718
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_SCREAMING_SNAKE_CASE = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__lowercase = []
for num in range(len(lowerCamelCase_ ) ):
__lowercase = 0
while 2 * i * i <= odd_composites[num]:
__lowercase = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 719
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 0
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_SCREAMING_SNAKE_CASE = 2_0_4_8
_SCREAMING_SNAKE_CASE = 4_0_9_6
_SCREAMING_SNAKE_CASE = 4_2
_SCREAMING_SNAKE_CASE = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_SCREAMING_SNAKE_CASE = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
def choose_first(lowerCamelCase_ : Dict , lowerCamelCase_ : str=False ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) == 1:
__lowercase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__lowercase = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
__lowercase = {'''id''': example['''id''']}
__lowercase = example['''annotations''']
__lowercase = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__lowercase = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__lowercase = __lowercase = []
__lowercase = __lowercase = []
__lowercase = ['''<cls>''']
else:
__lowercase = ['''short''']
__lowercase = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
__lowercase = ['''long''']
__lowercase = choose_first(annotation['''long_answer'''] , is_long_answer=lowerCamelCase_ )
__lowercase = []
answer.update(lowerCamelCase_ )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
__lowercase = True
else:
__lowercase = False
__lowercase = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , lowerCamelCase_ ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int]=False ):
__lowercase = _get_single_answer(lowerCamelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase = example['''document''']['''tokens''']
__lowercase = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__lowercase = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__lowercase = example['''document''']['''tokens''']
__lowercase = answer['''start_token''']
__lowercase = answer['''end_token''']
__lowercase = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__lowercase = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__lowercase = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__lowercase = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__lowercase = ''' '''.join([old[i] for i in range(len(lowerCamelCase_ ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , lowerCamelCase_ , end='''\n''' )
print('''Old:''' , lowerCamelCase_ , end='''\n\n''' )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=2_0_4_8 , lowerCamelCase_ : str=4_0_9_6 , lowerCamelCase_ : Union[str, Any]=True ):
# overlap will be of doc_stride - q_len
__lowercase = get_context_and_ans(lowerCamelCase_ , assertion=lowerCamelCase_ )
__lowercase = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__lowercase = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
__lowercase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase = []
__lowercase = []
__lowercase = input_ids[:q_len]
__lowercase = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
for i in doc_start_indices:
__lowercase = i + max_length - q_len
__lowercase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowerCamelCase_ ),
"end_token": [-1_0_0] * len(lowerCamelCase_ ),
"category": category,
},
}
__lowercase = out['''context'''].split()
__lowercase = splitted_context[answer['''end_token''']]
__lowercase = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=lowerCamelCase_ , ).input_ids )
__lowercase = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=lowerCamelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__lowercase = len(tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__lowercase = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__lowercase = answer['''start_token''']
__lowercase = answer['''end_token''']
if assertion:
__lowercase = tokenizer.decode(lowerCamelCase_ )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , lowerCamelCase_ , end='''\n\n''' )
if len(lowerCamelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__lowercase = input_ids[:q_len]
__lowercase = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = [] # null, yes, no, long, short
for i in doc_start_indices:
__lowercase = i + max_length - q_len
__lowercase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__lowercase = start_token - i + q_len
__lowercase = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
__lowercase = -1_0_0
__lowercase = -1_0_0
answers_category.append('''null''' )
__lowercase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_ )
answers_end_token.append(lowerCamelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(lowerCamelCase_ ) )
print('''Old:''' , tokenizer.decode(lowerCamelCase_ ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[str]=2_0_4_8 , lowerCamelCase_ : Optional[int]=4_0_9_6 , lowerCamelCase_ : Tuple=False ):
__lowercase = get_strided_contexts_and_ans(
lowerCamelCase_ , lowerCamelCase_ , doc_stride=lowerCamelCase_ , max_length=lowerCamelCase_ , assertion=lowerCamelCase_ , )
return example
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict ):
with jsonlines.open(lowerCamelCase_ , '''a''' ) as writer:
for example in tqdm(lowerCamelCase_ , total=len(lowerCamelCase_ ) , desc='''Saving samples ... ''' ):
__lowercase = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_SCREAMING_SNAKE_CASE = load_dataset('''natural_questions''')
_SCREAMING_SNAKE_CASE = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_SCREAMING_SNAKE_CASE = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_SCREAMING_SNAKE_CASE = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_SCREAMING_SNAKE_CASE = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_SCREAMING_SNAKE_CASE = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_SCREAMING_SNAKE_CASE = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 720
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 0
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=sys.maxsize ) -> str:
'''simple docstring'''
__lowercase = '''bilinear'''
__lowercase = max_size
__lowercase = short_edge_length
def __call__(self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = []
for img in imgs:
__lowercase , __lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowercase = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowercase = size * 1.0 / min(_lowerCamelCase ,_lowerCamelCase )
if h < w:
__lowercase , __lowercase = size, scale * w
else:
__lowercase , __lowercase = scale * h, size
if max(_lowerCamelCase ,_lowerCamelCase ) > self.max_size:
__lowercase = self.max_size * 1.0 / max(_lowerCamelCase ,_lowerCamelCase )
__lowercase = newh * scale
__lowercase = neww * scale
__lowercase = int(neww + 0.5 )
__lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowercase = Image.fromarray(_lowerCamelCase )
__lowercase = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
__lowercase = np.asarray(_lowerCamelCase )
else:
__lowercase = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowercase = nn.functional.interpolate(
_lowerCamelCase ,(newh, neww) ,mode=self.interp_method ,align_corners=_lowerCamelCase ).squeeze(0 )
img_augs.append(_lowerCamelCase )
return img_augs
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
__lowercase = cfg.INPUT.FORMAT
__lowercase = cfg.SIZE_DIVISIBILITY
__lowercase = cfg.PAD_VALUE
__lowercase = cfg.INPUT.MAX_SIZE_TEST
__lowercase = cfg.MODEL.DEVICE
__lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = lambda _lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = tuple(max(_lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
__lowercase = [im.shape[-2:] for im in images]
__lowercase = [
nn.functional.pad(
_lowerCamelCase ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(_lowerCamelCase ,_lowerCamelCase )
]
return torch.stack(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [images]
if single_image:
assert len(_lowerCamelCase ) == 1
for i in range(len(_lowerCamelCase ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(_lowerCamelCase ,images.pop(_lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
_lowerCamelCase ,torch.as_tensor(img_tensorize(images.pop(_lowerCamelCase ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
__lowercase = torch.tensor([im.shape[:2] for im in images] )
__lowercase = self.aug(_lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowercase = [self.normalizer(_lowerCamelCase ) for x in images]
# now pad them to do the following operations
__lowercase , __lowercase = self.pad(_lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowercase = torch.true_divide(_lowerCamelCase ,_lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple[int, int] ):
assert torch.isfinite(lowerCamelCase_ ).all(), "Box tensor contains infinite or NaN!"
__lowercase , __lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase_ )
| 721
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 700
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = PILImageResampling.BICUBIC ,_lowerCamelCase = True ,_lowerCamelCase = True ,_lowerCamelCase = 1 / 255 ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = size if size is not None else {'''height''': 224, '''width''': 224}
__lowercase = get_size_dict(_lowerCamelCase )
__lowercase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = do_normalize
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = size
__lowercase = resample
__lowercase = rescale_factor
__lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = PILImageResampling.BILINEAR ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
__lowercase = get_resize_output_image_size(_lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowercase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' ,default_to_square=_lowerCamelCase )
__lowercase = resample if resample is not None else self.resample
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
__lowercase = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=_lowerCamelCase ,size=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 701
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
_SCREAMING_SNAKE_CASE = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
for attribute in key.split('''.''' ):
__lowercase = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
__lowercase = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict ):
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(lowerCamelCase_ )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , lowerCamelCase_ )
if "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
else:
__lowercase = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : str=True ):
if config_path is not None:
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = UniSpeechSatConfig()
__lowercase = ''''''
if is_finetuned:
__lowercase = UniSpeechSatForCTC(lowerCamelCase_ )
else:
__lowercase = UniSpeechSatForPreTraining(lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowercase = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : list[str] ):
__lowercase = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 0
|
import os
from datetime import datetime as dt
from github import Github
_SCREAMING_SNAKE_CASE = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _lowerCAmelCase ( ):
__lowercase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowercase = g.get_repo('''huggingface/accelerate''' )
__lowercase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowercase = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
__lowercase = comments[0] if len(lowerCamelCase_ ) > 0 else None
__lowercase = dt.utcnow()
__lowercase = (current_time - issue.updated_at).days
__lowercase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 0
|
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : str ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ):
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 705
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 0
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_SCREAMING_SNAKE_CASE = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = True ,) -> Optional[int]:
'''simple docstring'''
__lowercase = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase ,_lowerCamelCase ) )]
if identifier is not None:
__lowercase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
for n_ in n_identifier:
__lowercase = [file for file in files if n_ not in file]
else:
__lowercase = [file for file in files if n_identifier not in file]
__lowercase = ignore_files or []
ignore_files.append('''__init__.py''' )
__lowercase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' ,_lowerCamelCase )
if only_modules:
__lowercase = file.split('''.''' )[0]
try:
__lowercase = getattr(_lowerCamelCase ,_lowerCamelCase )
__lowercase = doctest.DocTestSuite(_lowerCamelCase )
__lowercase = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
__lowercase = doctest.testfile(str('''..''' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = '''modeling'''
__lowercase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(_lowerCamelCase ,identifier=_lowerCamelCase ,ignore_files=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = '''tokenization'''
self.analyze_directory(_lowerCamelCase ,identifier=_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = '''configuration'''
self.analyze_directory(_lowerCamelCase ,identifier=_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(_lowerCamelCase ,n_identifier=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = Path('''docs/source''' )
__lowercase = ['''favicon.ico''']
self.analyze_directory(_lowerCamelCase ,ignore_files=_lowerCamelCase ,only_modules=_lowerCamelCase )
| 706
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 0
|
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__lowercase = set(range(3 , lowerCamelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase_ , lowerCamelCase_ ) ) )
__lowercase = [float(lowerCamelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 708
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
_SCREAMING_SNAKE_CASE = TypeVar('''T''')
_SCREAMING_SNAKE_CASE = Union[List[T], Tuple[T, ...]]
_SCREAMING_SNAKE_CASE = Union[T, List[T], Dict[str, T]]
_SCREAMING_SNAKE_CASE = Union[str, bytes, os.PathLike]
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = "masked_bert"
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=0 ,_lowerCamelCase="topK" ,_lowerCamelCase="constant" ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = pruning_method
__lowercase = mask_init
__lowercase = mask_scale
| 710
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 0
|
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = [int(lowerCamelCase_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCamelCase_ ) == 4 and all(0 <= int(lowerCamelCase_ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input().strip()
_SCREAMING_SNAKE_CASE = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
__lowercase = len(lowerCamelCase_ )
for i in range(length - 1 ):
__lowercase = i
for k in range(i + 1 , lowerCamelCase_ ):
if collection[k] < collection[least]:
__lowercase = k
if least != i:
__lowercase , __lowercase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 712
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 713
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 715
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( lowerCamelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , lowerCamelCase_ , )
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
__lowercase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowercase , __lowercase = image[0].size
__lowercase , __lowercase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowercase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__lowercase = np.concatenate(lowerCamelCase_ , axis=0 )
__lowercase = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_5_5.0
__lowercase = image.transpose(0 , 3 , 1 , 2 )
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
__lowercase = torch.cat(lowerCamelCase_ , dim=0 )
return image
def _lowerCAmelCase ( lowerCamelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(lowerCamelCase_ , torch.Tensor ):
return mask
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
__lowercase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__lowercase , __lowercase = mask[0].size
__lowercase , __lowercase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__lowercase = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__lowercase = np.concatenate(lowerCamelCase_ , axis=0 )
__lowercase = mask.astype(np.floataa ) / 2_5_5.0
__lowercase = 0
__lowercase = 1
__lowercase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
__lowercase = torch.cat(lowerCamelCase_ , dim=0 )
return mask
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : UNetaDModel
a : RePaintScheduler
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 250 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = 10 ,_lowerCamelCase = 10 ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__lowercase = image
__lowercase = _preprocess_image(_lowerCamelCase )
__lowercase = original_image.to(device=self.device ,dtype=self.unet.dtype )
__lowercase = _preprocess_mask(_lowerCamelCase )
__lowercase = mask_image.to(device=self.device ,dtype=self.unet.dtype )
__lowercase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowerCamelCase ,_lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__lowercase = original_image.shape
__lowercase = randn_tensor(_lowerCamelCase ,generator=_lowerCamelCase ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,self.device )
__lowercase = eta
__lowercase = self.scheduler.timesteps[0] + 1
__lowercase = generator[0] if isinstance(_lowerCamelCase ,_lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
__lowercase = self.scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowercase = self.scheduler.undo_step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
__lowercase = t
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 716
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "open-llama"
def __init__(self ,_lowerCamelCase=100000 ,_lowerCamelCase=4096 ,_lowerCamelCase=11008 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase="silu" ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-6 ,_lowerCamelCase=True ,_lowerCamelCase=0 ,_lowerCamelCase=1 ,_lowerCamelCase=2 ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = rms_norm_eps
__lowercase = use_cache
__lowercase = kwargs.pop(
'''use_memorry_efficient_attention''' ,_lowerCamelCase )
__lowercase = hidden_dropout_prob
__lowercase = attention_dropout_prob
__lowercase = use_stable_embedding
__lowercase = shared_input_output_embedding
__lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,tie_word_embeddings=_lowerCamelCase ,**_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
__lowercase = self.rope_scaling.get('''type''' ,_lowerCamelCase )
__lowercase = self.rope_scaling.get('''factor''' ,_lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase ,_lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 717
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = OpenAIGPTTokenizer
a : int = OpenAIGPTTokenizerFast
a : Tuple = True
a : Any = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
__lowercase = '''lower'''
__lowercase = ['''low''', '''er</w>''']
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = tokens + ['''<unk>''']
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Simple input
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Simple input
self.assertRaises(
_lowerCamelCase ,tokenizer_r.batch_encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' ,)
# Pair input
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Pair input
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Pair input
self.assertRaises(
_lowerCamelCase ,tokenizer_r.batch_encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' ,)
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
| 718
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 0
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ):
__lowercase = 1_0
__lowercase = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
__lowercase = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0,
'''id''': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
_SCREAMING_SNAKE_CASE = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
__lowercase = FILE_CONTENT
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
import bza
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
__lowercase = bytes(lowerCamelCase_ , '''utf-8''' )
with bza.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
import gzip
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
__lowercase = bytes(lowerCamelCase_ , '''utf-8''' )
with gzip.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
__lowercase = bytes(lowerCamelCase_ , '''utf-8''' )
with lza.frame.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(lowerCamelCase_ , '''w''' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
import tarfile
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
import lzma
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
__lowercase = bytes(lowerCamelCase_ , '''utf-8''' )
with lzma.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
import zipfile
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
__lowercase = bytes(lowerCamelCase_ , '''utf-8''' )
with zstd.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
__lowercase = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return filename
_SCREAMING_SNAKE_CASE = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
_SCREAMING_SNAKE_CASE = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
_SCREAMING_SNAKE_CASE = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
_SCREAMING_SNAKE_CASE = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
_SCREAMING_SNAKE_CASE = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = datasets.Dataset.from_dict(lowerCamelCase_ )
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
__lowercase = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(lowerCamelCase_ , '''w''' , newline='''''' ) as f:
__lowercase = csv.DictWriter(lowerCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(lowerCamelCase_ , '''w''' , newline='''''' ) as f:
__lowercase = csv.DictWriter(lowerCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
import bza
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(lowerCamelCase_ , '''rb''' ) as f:
__lowercase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
__lowercase = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(lowerCamelCase_ , '''wb''' ) as f:
__lowercase = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
__lowercase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__lowercase = {'''data''': DATA}
with open(lowerCamelCase_ , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__lowercase = {'''data''': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
import gzip
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(lowerCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(lowerCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ):
import gzip
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(lowerCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(lowerCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = ['''0''', '''1''', '''2''', '''3''']
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = ['''0''', '''1''', '''2''', '''3''']
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = ['''0''', '''1''', '''2''', '''3''']
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(lowerCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Any ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
__lowercase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__lowercase = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
return data_dir
| 719
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 0
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = ["vqvae"]
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase ,mel=_lowerCamelCase ,vqvae=_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_lowerCamelCase ) else 1000
@torch.no_grad()
def __call__(self ,_lowerCamelCase = 1 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = 0 ,_lowerCamelCase = 0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = 0 ,_lowerCamelCase = 0 ,_lowerCamelCase = None ,_lowerCamelCase = 0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=True ,) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCamelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_lowerCamelCase ,device=self.device ,)
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCamelCase ,_lowerCamelCase )
__lowercase = self.mel.audio_slice_to_image(_lowerCamelCase )
__lowercase = np.frombuffer(input_image.tobytes() ,dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 255) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_lowerCamelCase ,0 ) ).latent_dist.sample(
generator=_lowerCamelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_lowerCamelCase ,_lowerCamelCase ,self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_lowerCamelCase ,_lowerCamelCase ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_lowerCamelCase ):
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )['''sample''']
else:
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase )['''sample''']
if isinstance(self.scheduler ,_lowerCamelCase ):
__lowercase = self.scheduler.step(
model_output=_lowerCamelCase ,timestep=_lowerCamelCase ,sample=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,)['''prev_sample''']
else:
__lowercase = self.scheduler.step(
model_output=_lowerCamelCase ,timestep=_lowerCamelCase ,sample=_lowerCamelCase ,generator=_lowerCamelCase ,)['''prev_sample''']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_lowerCamelCase )['''sample''']
__lowercase = (images / 2 + 0.5).clamp(0 ,1 )
__lowercase = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
__lowercase = (images * 255).round().astype('''uint8''' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCamelCase ,mode='''RGB''' ).convert('''L''' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCamelCase )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_lowerCamelCase ) )
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler ,_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 255) * 2 - 1
__lowercase = torch.Tensor(_lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase )['''sample''']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> torch.Tensor:
'''simple docstring'''
__lowercase = acos(torch.dot(torch.flatten(_lowerCamelCase ) ,torch.flatten(_lowerCamelCase ) ) / torch.norm(_lowerCamelCase ) / torch.norm(_lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCamelCase ) + sin(alpha * theta ) * xa / sin(_lowerCamelCase )
| 720
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 0
|
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__lowercase = 0
__lowercase = 0
__lowercase = 4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 721
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 0
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '''T5Config'''
def _lowerCAmelCase ( lowerCamelCase_ : jnp.array , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase = jnp.zeros_like(lowerCamelCase_ )
__lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase = shifted_input_ids.at[:, 0].set(lowerCamelCase_ )
__lowercase = jnp.where(shifted_input_ids == -1_0_0 , lowerCamelCase_ , lowerCamelCase_ )
return shifted_input_ids
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = "mt5"
a : Optional[Any] = MTaConfig
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "mt5"
a : int = MTaConfig
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "mt5"
a : Tuple = MTaConfig
| 700
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 0
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : str = PriorTransformer
a : Optional[Any] = "hidden_states"
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = 4
__lowercase = 8
__lowercase = 7
__lowercase = floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCAmelCase (self ,_lowerCamelCase=0 ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(_lowerCamelCase )
__lowercase = 4
__lowercase = 8
__lowercase = 7
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return (4, 8)
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return (4, 8)
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_lowerCamelCase )
__lowercase = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.model_class(**_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
__lowercase = model.to(_lowerCamelCase )
if hasattr(_lowerCamelCase ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
__lowercase = self.get_dummy_seed_input()
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )[0]
__lowercase = output[0, :5].flatten().cpu()
print(_lowerCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_lowerCamelCase ,_lowerCamelCase ,rtol=1E-2 ) )
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase=1 ,_lowerCamelCase=768 ,_lowerCamelCase=77 ,_lowerCamelCase=0 ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(_lowerCamelCase )
__lowercase = batch_size
__lowercase = embedding_dim
__lowercase = num_embeddings
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(_lowerCamelCase )
__lowercase = self.get_dummy_seed_input(seed=_lowerCamelCase )
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )[0]
assert list(sample.shape ) == [1, 768]
__lowercase = sample[0, :8].flatten().cpu()
print(_lowerCamelCase )
__lowercase = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 )
| 701
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 0
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_SCREAMING_SNAKE_CASE = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any]=None ):
require_version(deps[pkg] , lowerCamelCase_ )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : Union[tf.Tensor, np.ndarray] ):
if isinstance(lowerCamelCase_ , np.ndarray ):
return list(tensor.shape )
__lowercase = tf.shape(lowerCamelCase_ )
if tensor.shape == tf.TensorShape(lowerCamelCase_ ):
return dynamic
__lowercase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase_ )]
def _lowerCAmelCase ( lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowerCamelCase_ , name=lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Optional[int]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__lowercase , __lowercase = tf.nn.moments(lowerCamelCase_ , axes=[axis] , keepdims=lowerCamelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowercase = [1] * inputs.shape.rank
__lowercase = shape_list(lowerCamelCase_ )[axis]
__lowercase = tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
# Compute layer normalization using the batch_normalization
# function.
__lowercase = tf.nn.batch_normalization(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , offset=lowerCamelCase_ , scale=lowerCamelCase_ , variance_epsilon=lowerCamelCase_ , )
return outputs
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Any=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowercase = tf.shape(lowerCamelCase_ )
__lowercase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowercase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : tf.Tensor ):
if not isinstance(lowerCamelCase_ , tf.Tensor ):
__lowercase = tf.convert_to_tensor(lowerCamelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowercase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowercase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase ( lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : int , lowerCamelCase_ : str = "input_ids" ):
tf.debugging.assert_less(
lowerCamelCase_ , tf.cast(lowerCamelCase_ , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase_ )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
__lowercase = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowercase = [x for x in data if len(lowerCamelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
__lowercase = np.asarray(lowerCamelCase_ )
__lowercase = 1
__lowercase = np.array_split(lowerCamelCase_ , lowerCamelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowercase = np.array_split(lowerCamelCase_ , lowerCamelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase_ ):
__lowercase = chunk_data
else:
__lowercase = data
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
if name in group.attrs:
__lowercase = [n.decode('''utf8''' ) if hasattr(lowerCamelCase_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__lowercase = []
__lowercase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(lowerCamelCase_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
def _expand_single_ad_tensor(lowerCamelCase_ : Tuple ):
if isinstance(lowerCamelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase_ )
| 703
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _lowerCAmelCase ( lowerCamelCase_ : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _lowerCAmelCase ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__lowercase = [1, 2, 3]
with pytest.raises(lowerCamelCase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=2 )
with pytest.raises(lowerCamelCase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase = [1, 2]
__lowercase = {'''a''': 1, '''b''': 2}
__lowercase = {'''a''': [1, 2], '''b''': [3, 4]}
__lowercase = {'''a''': {'''1''': 1}, '''b''': 2}
__lowercase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__lowercase = [2, 3]
__lowercase = {'''a''': 2, '''b''': 3}
__lowercase = {'''a''': [2, 3], '''b''': [4, 5]}
__lowercase = {'''a''': {'''1''': 2}, '''b''': 3}
__lowercase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
| 704
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 0
|
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 705
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = "instructblip_vision_model"
def __init__(self ,_lowerCamelCase=1408 ,_lowerCamelCase=6144 ,_lowerCamelCase=39 ,_lowerCamelCase=16 ,_lowerCamelCase=224 ,_lowerCamelCase=14 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1E-6 ,_lowerCamelCase=0.0 ,_lowerCamelCase=1E-1_0 ,_lowerCamelCase=True ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "instructblip_qformer"
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=0 ,_lowerCamelCase="absolute" ,_lowerCamelCase=2 ,_lowerCamelCase=1408 ,**_lowerCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__lowercase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = "instructblip"
a : str = True
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=32 ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
__lowercase = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__lowercase = InstructBlipVisionConfig(**_lowerCamelCase )
__lowercase = InstructBlipQFormerConfig(**_lowerCamelCase )
__lowercase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__lowercase = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.0_2
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 706
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 0
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=7 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=99 ,_lowerCamelCase=32 ,_lowerCamelCase=5 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=50 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=True ,_lowerCamelCase=None ,) -> int:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = use_labels
__lowercase = scope
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=_lowerCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = BertGenerationEncoder(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
__lowercase = True
__lowercase = BertGenerationEncoder(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,)
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
__lowercase = True
__lowercase = True
__lowercase = BertGenerationDecoder(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
# first forward pass
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,use_cache=_lowerCamelCase ,)
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,)['''hidden_states'''][0]
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,past_key_values=_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,)['''hidden_states'''][0]
# select random slice
__lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
__lowercase = BertGenerationDecoder(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs()
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a : Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else ()
a : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = BertGenerationEncoderTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,hidden_size=37 )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
__lowercase = '''bert'''
self.model_tester.create_and_check_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__lowercase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__lowercase = model(_lowerCamelCase )[0]
__lowercase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,_lowerCamelCase )
__lowercase = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_lowerCamelCase ,atol=1E-4 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__lowercase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__lowercase = model(_lowerCamelCase )[0]
__lowercase = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape ,_lowerCamelCase )
__lowercase = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_lowerCamelCase ,atol=1E-4 ) )
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 0
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 708
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import random
class __lowercase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> tuple[list[int], list[int]]:
'''simple docstring'''
__lowercase = [ord(_lowerCamelCase ) for i in text]
__lowercase = []
__lowercase = []
for i in plain:
__lowercase = random.randint(1 ,300 )
__lowercase = (i + k) * k
cipher.append(_lowerCamelCase )
key.append(_lowerCamelCase )
return cipher, key
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = []
for i in range(len(_lowerCamelCase ) ):
__lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowerCamelCase ) )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 710
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
self.check_model_type(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase = {}, {}
if padding is not None:
__lowercase = padding
if truncation is not None:
__lowercase = truncation
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__(self ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> str:
'''simple docstring'''
if isinstance(_lowerCamelCase ,(Image.Image, str) ) and isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = {'''image''': image, '''question''': question}
else:
__lowercase = image
__lowercase = super().__call__(_lowerCamelCase ,**_lowerCamelCase )
return results
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=False ,_lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
__lowercase = load_image(inputs['''image'''] )
__lowercase = self.tokenizer(
inputs['''question'''] ,return_tensors=self.framework ,padding=_lowerCamelCase ,truncation=_lowerCamelCase )
__lowercase = self.image_processor(images=_lowerCamelCase ,return_tensors=self.framework )
model_inputs.update(_lowerCamelCase )
return model_inputs
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.model(**_lowerCamelCase )
return model_outputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=5 ) -> Tuple:
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.sigmoid()[0]
__lowercase , __lowercase = probs.topk(_lowerCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase ,_lowerCamelCase )]
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ):
'''simple docstring'''
__lowercase = HfArgumentParser(lowerCamelCase_ )
__lowercase = parser.parse_args_into_dataclasses()[0]
__lowercase = TensorFlowBenchmark(args=lowerCamelCase_ )
try:
__lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowercase = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__lowercase = ''' '''.join(str(lowerCamelCase_ ).split(''' ''' )[:-1] )
__lowercase = ''''''
__lowercase = eval(str(lowerCamelCase_ ).split(''' ''' )[-1] )
__lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__lowercase = full_error_msg + begin_error_msg + str(lowerCamelCase_ )
raise ValueError(lowerCamelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return TrainCommand(lowerCamelCase_ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = parser.add_parser('''train''' ,help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' ,)
train_parser.add_argument(
'''--column_label''' ,type=_lowerCamelCase ,default=0 ,help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' ,type=_lowerCamelCase ,default=1 ,help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' ,type=_lowerCamelCase ,default=2 ,help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' ,action='''store_true''' ,help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' ,type=_lowerCamelCase ,default='''''' ,help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' ,type=_lowerCamelCase ,default=0.1 ,help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' ,)
train_parser.add_argument('''--output''' ,type=_lowerCamelCase ,default='''./''' ,help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' ,type=_lowerCamelCase ,default='''text_classification''' ,help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' ,type=_lowerCamelCase ,default='''bert-base-uncased''' ,help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' ,type=_lowerCamelCase ,default=32 ,help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' ,type=_lowerCamelCase ,default=64 ,help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' ,type=_lowerCamelCase ,default=3E-5 ,help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' ,type=_lowerCamelCase ,default=1E-0_8 ,help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = logging.get_logger('''transformers-cli/training''' )
__lowercase = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output ,exist_ok=_lowerCamelCase )
__lowercase = args.output
__lowercase = args.column_label
__lowercase = args.column_text
__lowercase = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
__lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
__lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
__lowercase = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
__lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
__lowercase = args.validation_split
__lowercase = args.train_batch_size
__lowercase = args.valid_batch_size
__lowercase = args.learning_rate
__lowercase = args.adam_epsilon
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 713
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Tuple = CpmAntTokenizer
a : Union[str, Any] = False
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__lowercase = '''今天天气真好!'''
__lowercase = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = '''今天天气真好!'''
__lowercase = [tokenizer.bos_token] + tokens
__lowercase = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
try:
with open(lowerCamelCase_ , '''rb''' ) as flax_state_f:
__lowercase = from_bytes(lowerCamelCase_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCamelCase_ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowercase = flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase_ : x.dtype == jnp.bfloataa , lowerCamelCase_ ) ).values()
if any(lowerCamelCase_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowercase = jax.tree_util.tree_map(
lambda lowerCamelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCamelCase_ )
__lowercase = ''''''
__lowercase = flatten_dict(lowerCamelCase_ , sep='''.''' )
__lowercase = pt_model.state_dict()
# keep track of unexpected & missing keys
__lowercase = []
__lowercase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowercase = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowercase = flax_key_tuple_array[:-1] + ['''weight''']
__lowercase = jnp.transpose(lowerCamelCase_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowercase = flax_key_tuple_array[:-1] + ['''weight''']
__lowercase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowercase = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCamelCase_ ):
__lowercase = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__lowercase = '''.'''.join(lowerCamelCase_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__lowercase = np.asarray(lowerCamelCase_ ) if not isinstance(lowerCamelCase_ , np.ndarray ) else flax_tensor
__lowercase = torch.from_numpy(lowerCamelCase_ )
# remove from missing keys
missing_keys.remove(lowerCamelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCamelCase_ )
pt_model.load_state_dict(lowerCamelCase_ )
# re-transform missing_keys to list
__lowercase = list(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(lowerCamelCase_ ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
return pt_model
| 715
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
__lowercase = nums[0]
for i in range(1 , len(lowerCamelCase_ ) ):
__lowercase = nums[i]
__lowercase = max(lowerCamelCase_ , ans + num , lowerCamelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_SCREAMING_SNAKE_CASE : Optional[Any] = int(input('''Enter number of elements : ''').strip())
_SCREAMING_SNAKE_CASE : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 716
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = "convbert"
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase=768 ,_lowerCamelCase=2 ,_lowerCamelCase=9 ,_lowerCamelCase=1 ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = embedding_size
__lowercase = head_ratio
__lowercase = conv_kernel_size
__lowercase = num_groups
__lowercase = classifier_dropout
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 718
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 719
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def _lowerCAmelCase ( lowerCamelCase_ : str ):
return choice(lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
__lowercase = random_pivot(lowerCamelCase_ )
# partition based on pivot
# linear time
__lowercase = [e for e in lst if e < pivot]
__lowercase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase_ ) < k - 1:
return kth_number(lowerCamelCase_ , k - len(lowerCamelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _lowerCAmelCase ( lowerCamelCase_ : list ):
if not postfix_notation:
return 0
__lowercase = {'''+''', '''-''', '''*''', '''/'''}
__lowercase = []
for token in postfix_notation:
if token in operations:
__lowercase , __lowercase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowerCamelCase_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 ):
__lowercase = right or len(lowerCamelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCamelCase_ , lowerCamelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 0
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = VOCAB_FILES_NAMES
a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_SCREAMING_SNAKE_CASE = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_SCREAMING_SNAKE_CASE = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(lowerCAmelCase__ )
class __lowercase :
'''simple docstring'''
def __call__(self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = False ,_lowerCamelCase = False ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase ,return_tensors=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,**_lowerCamelCase ,)
elif titles is None or texts is None:
__lowercase = titles if texts is None else texts
return super().__call__(
_lowerCamelCase ,_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase ,return_tensors=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = titles if not isinstance(_lowerCamelCase ,_lowerCamelCase ) else [titles]
__lowercase = texts if not isinstance(_lowerCamelCase ,_lowerCamelCase ) else [texts]
__lowercase = len(_lowerCamelCase )
__lowercase = questions if not isinstance(_lowerCamelCase ,_lowerCamelCase ) else [questions] * n_passages
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts." )
__lowercase = super().__call__(_lowerCamelCase ,_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase )['''input_ids''']
__lowercase = super().__call__(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase )['''input_ids''']
__lowercase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase ,_lowerCamelCase )
]
}
if return_attention_mask is not False:
__lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase = attention_mask
return self.pad(_lowerCamelCase ,padding=_lowerCamelCase ,max_length=_lowerCamelCase ,return_tensors=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 16 ,_lowerCamelCase = 64 ,_lowerCamelCase = 4 ,) -> List[DPRSpanPrediction]:
'''simple docstring'''
__lowercase = reader_input['''input_ids''']
__lowercase , __lowercase , __lowercase = reader_output[:3]
__lowercase = len(_lowerCamelCase )
__lowercase = sorted(range(_lowerCamelCase ) ,reverse=_lowerCamelCase ,key=relevance_logits.__getitem__ )
__lowercase = []
for doc_id in sorted_docs:
__lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase = sequence_ids.index(self.pad_token_id )
else:
__lowercase = len(_lowerCamelCase )
__lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_lowerCamelCase ,top_spans=_lowerCamelCase ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_lowerCamelCase ,start_index=_lowerCamelCase ,end_index=_lowerCamelCase ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> List[DPRSpanPrediction]:
'''simple docstring'''
__lowercase = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase = sorted(_lowerCamelCase ,key=lambda _lowerCamelCase : x[1] ,reverse=_lowerCamelCase )
__lowercase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
__lowercase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = VOCAB_FILES_NAMES
a : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
a : int = ["input_ids", "attention_mask"]
| 701
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_SCREAMING_SNAKE_CASE = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ) -> Optional[int]:
'''simple docstring'''
__lowercase = None
__lowercase = os.path.abspath(os.path.join('''examples''' ,'''by_feature''' ) )
__lowercase = os.path.abspath('''examples''' )
for item in os.listdir(_lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if os.path.isfile(_lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCamelCase ,feature_script=_lowerCamelCase ,tested_section='''main()''' if parser_only else '''training_function()''' ,):
__lowercase = compare_against_test(
os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
__lowercase = '''\n'''.join(_lowerCamelCase )
if special_strings is not None:
for string in special_strings:
__lowercase = diff.replace(_lowerCamelCase ,'''''' )
self.assertEqual(_lowerCamelCase ,'''''' )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
self.one_complete_example('''complete_nlp_example.py''' ,_lowerCamelCase )
self.one_complete_example('''complete_nlp_example.py''' ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = os.path.abspath(os.path.join('''examples''' ,'''cv_example.py''' ) )
__lowercase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
self.one_complete_example('''complete_cv_example.py''' ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = False
@classmethod
def _UpperCAmelCase (cls ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls._tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _UpperCAmelCase (cls ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'''epoch_0''' ) ) )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
__lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'''step_2''' ) ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,'epoch_0' )}\n ".split()
__lowercase = run_command(self._launch_args + testargs ,return_stdout=_lowerCamelCase )
self.assertNotIn('''epoch 0:''' ,_lowerCamelCase )
self.assertIn('''epoch 1:''' ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,'step_2' )}\n ".split()
__lowercase = run_command(self._launch_args + testargs ,return_stdout=_lowerCamelCase )
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' ,_lowerCamelCase )
self.assertIn('''epoch 1:''' ,_lowerCamelCase )
else:
self.assertIn('''epoch 0:''' ,_lowerCamelCase )
self.assertIn('''epoch 1:''' ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ ,{'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__lowercase = run_command(self._launch_args + testargs ,return_stdout=_lowerCamelCase )
__lowercase = re.findall('''({.+})''' ,_lowerCamelCase )
__lowercase = [r for r in results if '''accuracy''' in r][-1]
__lowercase = ast.literal_eval(_lowerCamelCase )
self.assertGreaterEqual(results['''accuracy'''] ,0.7_5 )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
__lowercase = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase ,'''tracking''' ) ) )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowercase :
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCamelCase ,_lowerCamelCase ,f"Difference between torch and flax is {diff} (>= {tol})." )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase ,_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = after_output[0]
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase ,1E-3 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
__lowercase = model(
input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase ,output_attentions=_lowerCamelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
pt_model.to(_lowerCamelCase )
pt_model.eval()
# prepare inputs
__lowercase = inputs_dict
__lowercase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase = pt_model(**_lowerCamelCase ).to_tuple()
__lowercase = fx_model(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase ,from_pt=_lowerCamelCase )
__lowercase = fx_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCamelCase )
__lowercase = VisionTextDualEncoderModel.from_pretrained(_lowerCamelCase ,from_flax=_lowerCamelCase )
pt_model_loaded.to(_lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase = pt_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCamelCase ,pt_output_loaded.numpy() ,4E-2 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase ,_lowerCamelCase )
__lowercase = VisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,_lowerCamelCase )
__lowercase = fx_state
self.check_pt_flax_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase ,_lowerCamelCase )
__lowercase = VisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = load_flax_weights_in_pytorch_model(_lowerCamelCase ,fx_model.params )
self.check_pt_flax_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCamelCase )
@is_pt_flax_cross_test
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_inputs_dict.pop('''vision_config''' )
__lowercase = config_inputs_dict.pop('''text_config''' )
__lowercase = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
self.check_equivalence_flax_to_pt(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCamelCase )
__lowercase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
__lowercase = model_a(**_lowerCamelCase )
__lowercase = after_outputs[0]
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase ,1E-5 )
@require_flax
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=_lowerCamelCase ,text_from_pt=_lowerCamelCase ,)
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = FlaxViTModel(_lowerCamelCase )
__lowercase = FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = FlaxViTModelTester(self )
__lowercase = FlaxBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=_lowerCamelCase ,text_from_pt=_lowerCamelCase ,)
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = FlaxCLIPVisionModel(_lowerCamelCase )
__lowercase = FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = FlaxCLIPVisionModelTester(self )
__lowercase = FlaxBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowercase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''np''' )
__lowercase = model(**_lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
__lowercase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,_lowerCamelCase ,atol=1E-3 ) )
| 703
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features ,_lowerCamelCase ,bias=_lowerCamelCase ) ,nn.Linear(_lowerCamelCase ,module.out_features ,bias=_lowerCamelCase ) ,)
__lowercase = (2.0 / (5 * min(module.in_features ,module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight ,std=_lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCAmelCase (self ,_lowerCamelCase ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.module(_lowerCamelCase ,*_lowerCamelCase ,**_lowerCamelCase ) + self.adapter(_lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : Any = "bigscience/bloom-1b7"
# Constant values
a : str = 2.1_09_65_95_52_69_25_74
a : Tuple = "Hello my name is"
a : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a : List[str] = 10
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,torch_dtype=torch.floataa ,device_map='''auto''' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_lowerCamelCase ,'''quantization_config''' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit ,self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_lowerCamelCase ,torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=_lowerCamelCase ) ,self.EXPECTED_OUTPUTS )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=_lowerCamelCase ) ,self.EXPECTED_OUTPUTS )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_lowerCamelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=_lowerCamelCase ,load_in_abit=_lowerCamelCase ,device_map='''auto''' ,bnb_abit_quant_type='''nf4''' ,)
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(_lowerCamelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase (cls ) -> List[Any]:
'''simple docstring'''
__lowercase = '''t5-small'''
__lowercase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = '''Translate in German: Hello, my dog is cute'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
__lowercase = modules
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q ,bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# model_name
__lowercase = '''bigscience/bloom-560m'''
__lowercase = '''t5-small'''
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
super().setUp()
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = pipeline(
'''text-generation''' ,model=self.model_name ,model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} ,max_new_tokens=self.MAX_NEW_TOKENS ,)
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] ,self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) ,{0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] ,skip_special_tokens=_lowerCamelCase ) ,self.EXPECTED_OUTPUTS )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
super().setUp()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) ,{torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_lowerCamelCase ) ):
__lowercase = LoRALayer(module.q_proj ,rank=16 )
__lowercase = LoRALayer(module.k_proj ,rank=16 )
__lowercase = LoRALayer(module.v_proj ,rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('''Test batch ''' ,return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_lowerCamelCase ,nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "gpt2-xl"
a : Union[str, Any] = 3.31_91_85_48_54_15_21_87
| 704
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 0
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_SCREAMING_SNAKE_CASE = 8
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple=BITS ):
__lowercase = x.device
__lowercase = (x * 2_5_5).int().clamp(0 , 2_5_5 )
__lowercase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ )
__lowercase = rearrange(lowerCamelCase_ , '''d -> d 1 1''' )
__lowercase = rearrange(lowerCamelCase_ , '''b c h w -> b c 1 h w''' )
__lowercase = ((x & mask) != 0).float()
__lowercase = rearrange(lowerCamelCase_ , '''b c d h w -> b (c d) h w''' )
__lowercase = bits * 2 - 1
return bits
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : List[str]=BITS ):
__lowercase = x.device
__lowercase = (x > 0).int()
__lowercase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ , dtype=torch.intaa )
__lowercase = rearrange(lowerCamelCase_ , '''d -> d 1 1''' )
__lowercase = rearrange(lowerCamelCase_ , '''b (c d) h w -> b c d h w''' , d=8 )
__lowercase = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def _lowerCAmelCase ( self : Tuple , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowercase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowercase = self.alphas_cumprod[timestep]
__lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowercase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowercase = self.bit_scale
if self.config.clip_sample:
__lowercase = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowercase = self._get_variance(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowercase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowercase = model_output.device if torch.is_tensor(lowerCamelCase_ ) else '''cpu'''
__lowercase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase_ ).to(lowerCamelCase_ )
__lowercase = self._get_variance(lowerCamelCase_ , lowerCamelCase_ ) ** 0.5 * eta * noise
__lowercase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def _lowerCAmelCase ( self : str , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int]="epsilon" , lowerCamelCase_ : Dict=None , lowerCamelCase_ : bool = True , ):
__lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = torch.split(lowerCamelCase_ , sample.shape[1] , dim=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = self.alphas_cumprod[t]
__lowercase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowercase = model_output
else:
raise ValueError(f"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
__lowercase = self.bit_scale
if self.config.clip_sample:
__lowercase = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowercase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowercase = 0
if t > 0:
__lowercase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase_ ).to(model_output.device )
__lowercase = (self._get_variance(lowerCamelCase_ , predicted_variance=lowerCamelCase_ ) ** 0.5) * noise
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 1.0 ,) -> int:
'''simple docstring'''
super().__init__()
__lowercase = bit_scale
__lowercase = (
ddim_bit_scheduler_step if isinstance(_lowerCamelCase ,_lowerCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__(self ,_lowerCamelCase = 256 ,_lowerCamelCase = 256 ,_lowerCamelCase = 50 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
__lowercase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=_lowerCamelCase ,)
__lowercase = decimal_to_bits(_lowerCamelCase ) * self.bit_scale
__lowercase = latents.to(self.device )
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ).prev_sample
__lowercase = bits_to_decimal(_lowerCamelCase )
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 705
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 0
|
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = RoFormerTokenizer
a : List[Any] = RoFormerTokenizerFast
a : Tuple = True
a : Any = True
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
super().setUp()
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = '''永和服装饰品有限公司,今天天气非常好'''
__lowercase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_rust_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
| 706
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = "owlvit_text_model"
def __init__(self ,_lowerCamelCase=49408 ,_lowerCamelCase=512 ,_lowerCamelCase=2048 ,_lowerCamelCase=12 ,_lowerCamelCase=8 ,_lowerCamelCase=16 ,_lowerCamelCase="quick_gelu" ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1.0 ,_lowerCamelCase=0 ,_lowerCamelCase=49406 ,_lowerCamelCase=49407 ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = max_position_embeddings
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = "owlvit_vision_model"
def __init__(self ,_lowerCamelCase=768 ,_lowerCamelCase=3072 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3 ,_lowerCamelCase=768 ,_lowerCamelCase=32 ,_lowerCamelCase="quick_gelu" ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1.0 ,**_lowerCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = image_size
__lowercase = patch_size
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = "owlvit"
a : str = True
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=512 ,_lowerCamelCase=2.6_5_9_2 ,_lowerCamelCase=True ,**_lowerCamelCase ,) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__lowercase = OwlViTTextConfig(**_lowerCamelCase )
__lowercase = OwlViTVisionConfig(**_lowerCamelCase )
__lowercase = projection_dim
__lowercase = logit_scale_init_value
__lowercase = return_dict
__lowercase = 1.0
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = {}
__lowercase = text_config
__lowercase = vision_config
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
return 1E-4
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = -1 ,_lowerCamelCase = -1 ,_lowerCamelCase = None ,) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_lowerCamelCase ,seq_length=_lowerCamelCase ,framework=_lowerCamelCase )
__lowercase = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_lowerCamelCase ,framework=_lowerCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 14
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) ,1 )
self.assertEqual(x.component(2 ) ,3 )
__lowercase = Vector()
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowerCamelCase ) ,'''(0,0,0,0,0,1)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowerCamelCase ) ,4 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2] )
__lowercase = Vector([1, 2, 3, 4, 5] )
__lowercase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowercase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() ,2.2_3_6 ,3 )
self.assertAlmostEqual(y.euclidean_length() ,7.4_1_6 ,3 )
self.assertEqual(z.euclidean_length() ,0 )
self.assertAlmostEqual(w.euclidean_length() ,7.6_1_6 ,3 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) ,2 )
self.assertEqual((x + y).component(1 ) ,3 )
self.assertEqual((x + y).component(2 ) ,4 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) ,0 )
self.assertEqual((x - y).component(1 ) ,1 )
self.assertEqual((x - y).component(2 ) ,2 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([2, -1, 4] ) # for test of dot product
__lowercase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) ,'''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) ,0 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) ,10 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 ,1 ) ) ,'''(0,1,0)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 ,_lowerCamelCase ,_lowerCamelCase ) ) ,'''(3,4,7)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 0, 0, 0, 0, 0] )
__lowercase = x.copy()
self.assertEqual(str(_lowerCamelCase ) ,str(_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 0, 0] )
x.change_component(0 ,0 )
x.change_component(1 ,1 )
self.assertEqual(str(_lowerCamelCase ) ,'''(0,1,0)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' ,str(_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] ,a.minor(_lowerCamelCase ,_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] ,a.cofactor(_lowerCamelCase ,_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(-5 ,a.determinant() )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ,3 ,3 )
__lowercase = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' ,str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' ,str(a * 2 ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
a.change_component(0 ,2 ,5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' ,str(_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(7 ,a.component(2 ,1 ) ,0.0_1 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' ,str(a + b ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' ,str(a - b ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' ,str(square_zero_matrix(5 ) ) ,)
if __name__ == "__main__":
unittest.main()
| 708
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
a : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
a : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
a : List[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Optional[int] = False
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 100
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__lowercase = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__lowercase = DDIMScheduler(**_lowerCamelCase )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=0 ) -> Any:
'''simple docstring'''
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
__lowercase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 )[0]
__lowercase = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__lowercase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
__lowercase = torch.manual_seed(_lowerCamelCase )
else:
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__lowercase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCamelCase )
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) ,return_dict=_lowerCamelCase ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__lowercase = init_image.resize((512, 512) )
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__lowercase = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 255.0
__lowercase = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
__lowercase = '''A robot, 4k photo'''
__lowercase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
__lowercase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' ,torch_dtype=torch.floataa )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase , __lowercase = pipe_prior(
_lowerCamelCase ,image=_lowerCamelCase ,strength=0.8_5 ,generator=_lowerCamelCase ,negative_prompt='''''' ,).to_tuple()
__lowercase = pipeline(
image=_lowerCamelCase ,image_embeds=_lowerCamelCase ,negative_image_embeds=_lowerCamelCase ,hint=_lowerCamelCase ,generator=_lowerCamelCase ,num_inference_steps=100 ,height=512 ,width=512 ,strength=0.5 ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase ,_lowerCamelCase )
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
if "metadata" in layer:
__lowercase = layer.split('''metadata''' )
__lowercase = ''''''.join(split_layer[0] )[:-1]
__lowercase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__lowercase = layer.split('''kvstore''' )
__lowercase = ''''''.join(split_layer[0] )[:-1]
__lowercase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__lowercase = layer.split('''/''' )
__lowercase = '''/'''.join(split_layer[:-1] )
__lowercase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowercase = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__lowercase = '''file'''
else:
__lowercase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
__lowercase = rename_keys(lowerCamelCase_ )
__lowercase = {}
for k, v in current_block.items():
__lowercase = v
__lowercase = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str = WEIGHTS_NAME ):
__lowercase = convert_file_size_to_int(lowerCamelCase_ )
__lowercase = []
__lowercase = {}
__lowercase = 0
__lowercase = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__lowercase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__lowercase = flatten_dict(lowerCamelCase_ , sep='''/''' )
__lowercase = {}
for layer in checkpoint_info.keys():
__lowercase , __lowercase , __lowercase = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
__lowercase = content
else:
__lowercase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowercase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowercase = torch.tensor(lowerCamelCase_ )
__lowercase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowercase , __lowercase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , lowerCamelCase_ )
__lowercase = '''/'''.join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowercase = os.path.join(
lowerCamelCase_ , weights_name.replace('''.bin''' , f"-{len(lowerCamelCase_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowercase = {}
__lowercase = 0
__lowercase = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowercase = os.path.join(lowerCamelCase_ , weights_name.replace('''.bin''' , f"-{len(lowerCamelCase_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowercase = {}
__lowercase = {}
for idx, shard in enumerate(lowerCamelCase_ ):
__lowercase = weights_name.replace(
'''.bin''' , f"-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin" ) # len(sharded_state_dicts):05d}
__lowercase = os.path.join(lowerCamelCase_ , weights_name.replace('''.bin''' , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
__lowercase = shard
for key in shard:
__lowercase = shard_file
# Add the metadata
__lowercase = {'''total_size''': total_size}
__lowercase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n'''
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _lowerCAmelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowercase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__lowercase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__lowercase = TaTokenizer.from_pretrained('''t5-small''' )
__lowercase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__lowercase = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
__lowercase = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 710
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 0
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase=None ) -> Any:
'''simple docstring'''
if not conversation_id:
__lowercase = uuid.uuida()
if past_user_inputs is None:
__lowercase = []
if generated_responses is None:
__lowercase = []
__lowercase = conversation_id
__lowercase = past_user_inputs
__lowercase = generated_responses
__lowercase = text
def __eq__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = False ) -> Optional[int]:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
f"with: \"{text}\"." )
__lowercase = text
else:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
f"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase = text
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
self.generated_responses.append(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ) -> int:
'''simple docstring'''
__lowercase = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase = '''user''' if is_user else '''bot'''
output += f"{name} >> {text} \n"
return output
@add_end_docstrings(
lowerCAmelCase__ , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__lowercase = self.tokenizer.eos_token
def _UpperCAmelCase (self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {}
__lowercase = {}
__lowercase = {}
if min_length_for_response is not None:
__lowercase = min_length_for_response
if minimum_tokens is not None:
__lowercase = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=0 ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = super().__call__(_lowerCamelCase ,num_workers=_lowerCamelCase ,**_lowerCamelCase )
if isinstance(_lowerCamelCase ,_lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=32 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer ,'''_build_conversation_input_ids''' ):
__lowercase = self.tokenizer._build_conversation_input_ids(_lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase = self._legacy_parse_and_tokenize(_lowerCamelCase )
if self.framework == "pt":
__lowercase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=10 ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = generate_kwargs.get('''max_length''' ,self.model.config.max_length )
__lowercase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase = max_length - minimum_tokens
__lowercase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase = model_inputs['''attention_mask'''][:, -trim:]
__lowercase = model_inputs.pop('''conversation''' )
__lowercase = max_length
__lowercase = self.model.generate(**_lowerCamelCase ,**_lowerCamelCase )
if self.model.config.is_encoder_decoder:
__lowercase = 1
else:
__lowercase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=True ) -> Dict:
'''simple docstring'''
__lowercase = model_outputs['''output_ids''']
__lowercase = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ,)
__lowercase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowerCamelCase )
return conversation
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer.eos_token_id
__lowercase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) )
if len(_lowerCamelCase ) > self.tokenizer.model_max_length:
__lowercase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 712
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 0
|
'''simple docstring'''
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase_ : Image ):
__lowercase , __lowercase = image.size
__lowercase = 0
__lowercase = image.load()
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
__lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
__lowercase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 713
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE = '''MobileNetV1Config'''
# Base docstring
_SCREAMING_SNAKE_CASE = '''google/mobilenet_v1_1.0_224'''
_SCREAMING_SNAKE_CASE = [1, 1_0_2_4, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE = '''google/mobilenet_v1_1.0_224'''
_SCREAMING_SNAKE_CASE = '''tabby, tabby cat'''
_SCREAMING_SNAKE_CASE = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=None ):
__lowercase = {}
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = model.mobilenet_va
else:
__lowercase = model
__lowercase = '''MobilenetV1/Conv2d_0/'''
__lowercase = backbone.conv_stem.convolution.weight
__lowercase = backbone.conv_stem.normalization.bias
__lowercase = backbone.conv_stem.normalization.weight
__lowercase = backbone.conv_stem.normalization.running_mean
__lowercase = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__lowercase = i + 1
__lowercase = i * 2
__lowercase = backbone.layer[pt_index]
__lowercase = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__lowercase = pointer.convolution.weight
__lowercase = pointer.normalization.bias
__lowercase = pointer.normalization.weight
__lowercase = pointer.normalization.running_mean
__lowercase = pointer.normalization.running_var
__lowercase = backbone.layer[pt_index + 1]
__lowercase = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__lowercase = pointer.convolution.weight
__lowercase = pointer.normalization.bias
__lowercase = pointer.normalization.weight
__lowercase = pointer.normalization.running_mean
__lowercase = pointer.normalization.running_var
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__lowercase = model.classifier.weight
__lowercase = model.classifier.bias
return tf_to_pt_map
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__lowercase = tf.train.list_variables(lowerCamelCase_ )
__lowercase = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
__lowercase = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = array
# Build TF to PyTorch weights loading map
__lowercase = _build_tf_to_pytorch_map(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
__lowercase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__lowercase = np.transpose(lowerCamelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowercase = array.squeeze().transpose()
else:
__lowercase = np.transpose(lowerCamelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
__lowercase = torch.from_numpy(lowerCamelCase_ )
tf_weights.pop(lowerCamelCase_ , lowerCamelCase_ )
tf_weights.pop(name + '''/RMSProp''' , lowerCamelCase_ )
tf_weights.pop(name + '''/RMSProp_1''' , lowerCamelCase_ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , lowerCamelCase_ )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def _lowerCAmelCase ( lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : nn.Convad ):
__lowercase , __lowercase = features.shape[-2:]
__lowercase , __lowercase = conv_layer.stride
__lowercase , __lowercase = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowercase = max(kernel_height - stride_height , 0 )
else:
__lowercase = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowercase = max(kernel_width - stride_width , 0 )
else:
__lowercase = max(kernel_width - (in_width % stride_width) , 0 )
__lowercase = pad_along_width // 2
__lowercase = pad_along_width - pad_left
__lowercase = pad_along_height // 2
__lowercase = pad_along_height - pad_top
__lowercase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ , lowerCamelCase_ , '''constant''' , 0.0 )
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 1 ,_lowerCamelCase = 1 ,_lowerCamelCase = False ,_lowerCamelCase = True ,_lowerCamelCase = True ,) -> None:
'''simple docstring'''
super().__init__()
__lowercase = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
__lowercase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__lowercase = nn.Convad(
in_channels=_lowerCamelCase ,out_channels=_lowerCamelCase ,kernel_size=_lowerCamelCase ,stride=_lowerCamelCase ,padding=_lowerCamelCase ,groups=_lowerCamelCase ,bias=_lowerCamelCase ,padding_mode='''zeros''' ,)
if use_normalization:
__lowercase = nn.BatchNormad(
num_features=_lowerCamelCase ,eps=config.layer_norm_eps ,momentum=0.9_9_9_7 ,affine=_lowerCamelCase ,track_running_stats=_lowerCamelCase ,)
else:
__lowercase = None
if use_activation:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,_lowerCamelCase ):
__lowercase = ACTaFN[config.hidden_act]
else:
__lowercase = config.hidden_act
else:
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ) -> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
__lowercase = apply_tf_padding(_lowerCamelCase ,self.convolution )
__lowercase = self.convolution(_lowerCamelCase )
if self.normalization is not None:
__lowercase = self.normalization(_lowerCamelCase )
if self.activation is not None:
__lowercase = self.activation(_lowerCamelCase )
return features
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = MobileNetVaConfig
a : Union[str, Any] = load_tf_weights_in_mobilenet_va
a : Optional[Any] = "mobilenet_v1"
a : int = "pixel_values"
a : Union[str, Any] = False
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if isinstance(_lowerCamelCase ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCamelCase ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_SCREAMING_SNAKE_CASE = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_SCREAMING_SNAKE_CASE = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase__ , )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase = True ) -> str:
'''simple docstring'''
super().__init__(_lowerCamelCase )
__lowercase = config
__lowercase = 32
__lowercase = max(int(depth * config.depth_multiplier ) ,config.min_depth )
__lowercase = MobileNetVaConvLayer(
_lowerCamelCase ,in_channels=config.num_channels ,out_channels=_lowerCamelCase ,kernel_size=3 ,stride=2 ,)
__lowercase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowercase = nn.ModuleList()
for i in range(13 ):
__lowercase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowercase = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase ,in_channels=_lowerCamelCase ,out_channels=_lowerCamelCase ,kernel_size=3 ,stride=strides[i] ,groups=_lowerCamelCase ,) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase ,in_channels=_lowerCamelCase ,out_channels=_lowerCamelCase ,kernel_size=1 ,) )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _UpperCAmelCase (self ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__lowercase = self.conv_stem(_lowerCamelCase )
__lowercase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__lowercase = layer_module(_lowerCamelCase )
if output_hidden_states:
__lowercase = all_hidden_states + (hidden_states,)
__lowercase = hidden_states
if self.pooler is not None:
__lowercase = torch.flatten(self.pooler(_lowerCamelCase ) ,start_dim=1 )
else:
__lowercase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase ,pooler_output=_lowerCamelCase ,hidden_states=_lowerCamelCase ,)
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
super().__init__(_lowerCamelCase )
__lowercase = config.num_labels
__lowercase = MobileNetVaModel(_lowerCamelCase )
__lowercase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowercase = nn.Dropout(config.classifier_dropout_prob ,inplace=_lowerCamelCase )
__lowercase = nn.Linear(_lowerCamelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _UpperCAmelCase (self ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.mobilenet_va(_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,return_dict=_lowerCamelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(self.dropout(_lowerCamelCase ) )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = '''single_label_classification'''
else:
__lowercase = '''multi_label_classification'''
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
__lowercase = loss_fct(_lowerCamelCase ,_lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_lowerCamelCase ,_lowerCamelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCamelCase ,logits=_lowerCamelCase ,hidden_states=outputs.hidden_states ,)
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
_SCREAMING_SNAKE_CASE = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_SCREAMING_SNAKE_CASE = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 715
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_SCREAMING_SNAKE_CASE : str = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowercase :
'''simple docstring'''
def __init__(self ) -> List[str]:
'''simple docstring'''
__lowercase = WATERMARK_BITS
__lowercase = WatermarkEncoder()
self.encoder.set_watermark('''bits''' ,self.watermark )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
if images.shape[-1] < 256:
return images
__lowercase = (255 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
__lowercase = [self.encoder.encode(_lowerCamelCase ,'''dwtDct''' ) for image in images]
__lowercase = torch.from_numpy(np.array(_lowerCamelCase ) ).permute(0 ,3 ,1 ,2 )
__lowercase = torch.clamp(2 * (images / 255 - 0.5) ,min=-1.0 ,max=1.0 )
return images
| 716
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
__lowercase = model.generate(_lowerCamelCase ,max_new_tokens=10 ,do_sample=_lowerCamelCase )
__lowercase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase ,max_new_tokens=10 ,do_sample=_lowerCamelCase ,streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase = cs.out[:-1]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
__lowercase = model.generate(_lowerCamelCase ,max_new_tokens=10 ,do_sample=_lowerCamelCase )
__lowercase = tokenizer.decode(greedy_ids[0] )
__lowercase = TextIteratorStreamer(_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase = Thread(target=model.generate ,kwargs=_lowerCamelCase )
thread.start()
__lowercase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
__lowercase = model.generate(_lowerCamelCase ,max_new_tokens=10 ,do_sample=_lowerCamelCase )
__lowercase = greedy_ids[:, input_ids.shape[1] :]
__lowercase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase = TextStreamer(_lowerCamelCase ,skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase ,max_new_tokens=10 ,do_sample=_lowerCamelCase ,streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase = cs.out[:-1]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
__lowercase = -1
__lowercase = torch.ones((1, 5) ,device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__lowercase = TextStreamer(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase ,max_new_tokens=1 ,do_sample=_lowerCamelCase ,streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__lowercase = cs.out[:-1] # Remove the final "\n"
__lowercase = tokenizer(_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
__lowercase = TextIteratorStreamer(_lowerCamelCase ,timeout=0.0_0_1 )
__lowercase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase = Thread(target=model.generate ,kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
__lowercase = ''''''
for new_text in streamer:
streamer_text += new_text
| 717
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
__lowercase = [image]
__lowercase = [trans(img.convert('''RGB''' ) ) for img in image]
__lowercase = torch.stack(lowerCamelCase_ )
return image
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowercase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = min(int(num_inference_steps * strength ) ,_lowerCamelCase )
__lowercase = max(num_inference_steps - init_timestep ,0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCamelCase )}" )
__lowercase = image.to(device=_lowerCamelCase ,dtype=_lowerCamelCase )
if isinstance(_lowerCamelCase ,_lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__lowercase = init_latents.shape
__lowercase = randn_tensor(_lowerCamelCase ,generator=_lowerCamelCase ,device=_lowerCamelCase ,dtype=_lowerCamelCase )
# get latents
print('''add noise to latents at timestep''' ,_lowerCamelCase )
__lowercase = self.scheduler.add_noise(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
__lowercase = init_latents
return latents
@torch.no_grad()
def __call__(self ,_lowerCamelCase = None ,_lowerCamelCase = 0.8 ,_lowerCamelCase = 1 ,_lowerCamelCase = None ,_lowerCamelCase = 0.0 ,_lowerCamelCase = 50 ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(_lowerCamelCase )
# 2. Preprocess image
__lowercase = preprocess(_lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowerCamelCase ,device=self.device )
__lowercase , __lowercase = self.get_timesteps(_lowerCamelCase ,_lowerCamelCase ,self.device )
__lowercase = timesteps[:1].repeat(_lowerCamelCase )
# 4. Prepare latent variables
__lowercase = self.prepare_latents(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,self.unet.dtype ,self.device ,_lowerCamelCase )
__lowercase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowerCamelCase ):
# 1. predict noise model_output
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowercase = self.scheduler.step(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,eta=_lowerCamelCase ,use_clipped_model_output=_lowerCamelCase ,generator=_lowerCamelCase ,).prev_sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowerCamelCase )
| 718
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : str = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : Dict = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = TextaTextGenerationPipeline(model=_lowerCamelCase ,tokenizer=_lowerCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = generator('''Something there''' )
self.assertEqual(_lowerCamelCase ,[{'''generated_text''': ANY(_lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__lowercase = generator(['''This is great !''', '''Something else'''] ,num_return_sequences=2 ,do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase ,[
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
] ,)
__lowercase = generator(
['''This is great !''', '''Something else'''] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase ,[
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
] ,)
with self.assertRaises(_lowerCamelCase ):
generator(4 )
@require_torch
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = pipeline('''text2text-generation''' ,model='''patrickvonplaten/t5-tiny-random''' ,framework='''pt''' )
# do_sample=False necessary for reproducibility
__lowercase = generator('''Something there''' ,do_sample=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,[{'''generated_text''': ''''''}] )
__lowercase = 3
__lowercase = generator(
'''Something there''' ,num_return_sequences=_lowerCamelCase ,num_beams=_lowerCamelCase ,)
__lowercase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = generator('''This is a test''' ,do_sample=_lowerCamelCase ,num_return_sequences=2 ,return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase ,[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] ,)
__lowercase = generator.model.config.eos_token_id
__lowercase = '''<pad>'''
__lowercase = generator(
['''This is a test''', '''This is a second test'''] ,do_sample=_lowerCamelCase ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_lowerCamelCase ,)
self.assertEqual(
_lowerCamelCase ,[
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] ,)
@require_tf
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = pipeline('''text2text-generation''' ,model='''patrickvonplaten/t5-tiny-random''' ,framework='''tf''' )
# do_sample=False necessary for reproducibility
__lowercase = generator('''Something there''' ,do_sample=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,[{'''generated_text''': ''''''}] )
| 719
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 0
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 720
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=7 ,_lowerCamelCase=3 ,_lowerCamelCase=18 ,_lowerCamelCase=30 ,_lowerCamelCase=400 ,_lowerCamelCase=True ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=[0.5, 0.5, 0.5] ,_lowerCamelCase=[0.5, 0.5, 0.5] ,) -> List[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size if size is not None else {'''height''': 18, '''width''': 20}
__lowercase = do_thumbnail
__lowercase = do_align_axis
__lowercase = do_pad
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = DonutImageProcessor if is_vision_available() else None
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = DonutImageProcessingTester(self )
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''image_std''' ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@is_flaky()
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 721
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {}
class _snake_case ( _A ):
_A = 'llama'
_A = ['past_key_values']
def __init__( self ,UpperCamelCase=32_000 ,UpperCamelCase=4_096 ,UpperCamelCase=11_008 ,UpperCamelCase=32 ,UpperCamelCase=32 ,UpperCamelCase=None ,UpperCamelCase="silu" ,UpperCamelCase=2_048 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-6 ,UpperCamelCase=True ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=False ,UpperCamelCase=None ,**UpperCamelCase ,) -> Tuple:
snake_case__ :int = vocab_size
snake_case__ :Any = max_position_embeddings
snake_case__ :int = hidden_size
snake_case__ :List[Any] = intermediate_size
snake_case__ :int = num_hidden_layers
snake_case__ :Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case__ :Dict = num_attention_heads
snake_case__ :List[str] = num_key_value_heads
snake_case__ :Optional[int] = hidden_act
snake_case__ :Any = initializer_range
snake_case__ :Dict = rms_norm_eps
snake_case__ :List[str] = pretraining_tp
snake_case__ :Any = use_cache
snake_case__ :Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,tie_word_embeddings=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
snake_case__ :Optional[int] = self.rope_scaling.get("type" ,UpperCamelCase )
snake_case__ :Optional[Any] = self.rope_scaling.get("factor" ,UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase ,UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase : Tuple = 2_5_6
class _snake_case ( _A ):
_A = ['melgan']
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> None:
super().__init__()
# From MELGAN
snake_case__ :int = math.log(1E-5 ) # Matches MelGAN training.
snake_case__ :List[Any] = 4.0 # Largest value for most examples
snake_case__ :List[str] = 128
self.register_modules(
notes_encoder=UpperCamelCase ,continuous_encoder=UpperCamelCase ,decoder=UpperCamelCase ,scheduler=UpperCamelCase ,melgan=UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=(-1.0, 1.0) ,UpperCamelCase=False ) -> Any:
snake_case__ , snake_case__ :Optional[int] = output_range
if clip:
snake_case__ :List[str] = torch.clip(UpperCamelCase ,self.min_value ,self.max_value )
# Scale to [0, 1].
snake_case__ :Tuple = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=(-1.0, 1.0) ,UpperCamelCase=False ) -> Any:
snake_case__ , snake_case__ :Union[str, Any] = input_range
snake_case__ :Any = torch.clip(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) if clip else outputs
# Scale to [0, 1].
snake_case__ :Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Optional[int] = input_tokens > 0
snake_case__ , snake_case__ :List[Any] = self.notes_encoder(
encoder_input_tokens=UpperCamelCase ,encoder_inputs_mask=UpperCamelCase )
snake_case__ , snake_case__ :List[Any] = self.continuous_encoder(
encoder_inputs=UpperCamelCase ,encoder_inputs_mask=UpperCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :Tuple = noise_time
if not torch.is_tensor(UpperCamelCase ):
snake_case__ :Tuple = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ :str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ :int = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
snake_case__ :str = self.decoder(
encodings_and_masks=UpperCamelCase ,decoder_input_tokens=UpperCamelCase ,decoder_noise_time=UpperCamelCase )
return logits
@torch.no_grad()
def __call__( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = 100 ,UpperCamelCase = True ,UpperCamelCase = "numpy" ,UpperCamelCase = None ,UpperCamelCase = 1 ,) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase ,UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(UpperCamelCase )}.' )
snake_case__ :List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
snake_case__ :Any = np.zeros([1, 0, self.n_dims] ,np.floataa )
snake_case__ :List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=UpperCamelCase ,device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase ):
if i == 0:
snake_case__ :int = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
snake_case__ :List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=UpperCamelCase ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
snake_case__ :List[Any] = ones
snake_case__ :List[Any] = self.scale_features(
UpperCamelCase ,output_range=[-1.0, 1.0] ,clip=UpperCamelCase )
snake_case__ :List[Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=UpperCamelCase ,continuous_mask=UpperCamelCase ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
snake_case__ :List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=UpperCamelCase ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(UpperCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ :List[str] = self.decode(
encodings_and_masks=UpperCamelCase ,input_tokens=UpperCamelCase ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
snake_case__ :str = self.scheduler.step(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,generator=UpperCamelCase ).prev_sample
snake_case__ :Any = self.scale_to_features(UpperCamelCase ,input_range=[-1.0, 1.0] )
snake_case__ :Union[str, Any] = mel[:1]
snake_case__ :Tuple = mel.cpu().float().numpy()
snake_case__ :List[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase ,UpperCamelCase )
logger.info("Generated segment" ,UpperCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
snake_case__ :str = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
snake_case__ :Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase )
| 57
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( _A ):
def __init__( self ,UpperCamelCase ) -> List[Any]:
super().__init__()
snake_case__ :Union[str, Any] = nn.ModuleList(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = False ,UpperCamelCase = True ,) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase ,UpperCamelCase ,self.nets ) ):
snake_case__ , snake_case__ :List[str] = controlnet(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,)
# merge samples
if i == 0:
snake_case__ , snake_case__ :Optional[int] = down_samples, mid_sample
else:
snake_case__ :Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase ,UpperCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = False ,UpperCamelCase = None ,) -> Union[str, Any]:
snake_case__ :List[str] = 0
snake_case__ :Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase ,is_main_process=UpperCamelCase ,save_function=UpperCamelCase ,safe_serialization=UpperCamelCase ,variant=UpperCamelCase ,)
idx += 1
snake_case__ :List[Any] = model_path_to_save + f'_{idx}'
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ,**UpperCamelCase ) -> Dict:
snake_case__ :List[Any] = 0
snake_case__ :int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case__ :Any = pretrained_model_path
while os.path.isdir(UpperCamelCase ):
snake_case__ :List[str] = ControlNetModel.from_pretrained(UpperCamelCase ,**UpperCamelCase )
controlnets.append(UpperCamelCase )
idx += 1
snake_case__ :Optional[Any] = pretrained_model_path + f'_{idx}'
logger.info(f'{len(UpperCamelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(UpperCamelCase ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(UpperCamelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(UpperCamelCase )
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
from collections import defaultdict
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case__ :str = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase ) )
]
snake_case__ :Any = defaultdict(UpperCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case__ :Dict = (1 << len(UpperCamelCase )) - 1
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Dict:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case__ :Any = self.count_ways_until(UpperCamelCase ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
snake_case__ :Tuple = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]:
# Store the list of persons for each task
for i in range(len(UpperCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__UpperCAmelCase : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 57
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Tuple , __snake_case : Tuple=False ) -> str:
'''simple docstring'''
snake_case__ :Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ :Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowercase_ ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[Any]=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ :List[str] = ""
else:
snake_case__ :List[Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ :Dict = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
snake_case__ :Dict = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ :Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ :List[str] = in_proj_bias[: config.hidden_size]
snake_case__ :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ :List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowercase_ ( __snake_case : Dict , __snake_case : int , __snake_case : Tuple ) -> Tuple:
'''simple docstring'''
snake_case__ :Optional[int] = dct.pop(__snake_case )
snake_case__ :Dict = val
def lowercase_ ( ) -> int:
'''simple docstring'''
snake_case__ :Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ :Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase_ ( __snake_case : Tuple , __snake_case : List[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ :Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ :Optional[int] = 10_00
snake_case__ :Tuple = "huggingface/label-files"
snake_case__ :Union[str, Any] = "imagenet-1k-id2label.json"
snake_case__ :Dict = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
snake_case__ :int = {int(__snake_case ): v for k, v in idalabel.items()}
snake_case__ :List[str] = idalabel
snake_case__ :Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case__ :int = int(deit_name[-6:-4] )
snake_case__ :str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
snake_case__ :Union[str, Any] = 1_92
snake_case__ :Optional[Any] = 7_68
snake_case__ :Tuple = 12
snake_case__ :int = 3
elif deit_name[9:].startswith("small" ):
snake_case__ :Any = 3_84
snake_case__ :Union[str, Any] = 15_36
snake_case__ :List[Any] = 12
snake_case__ :Optional[Any] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
snake_case__ :Tuple = 10_24
snake_case__ :int = 40_96
snake_case__ :List[Any] = 24
snake_case__ :Any = 16
# load original model from timm
snake_case__ :Optional[Any] = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ :str = timm_model.state_dict()
snake_case__ :Any = create_rename_keys(__snake_case , __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
snake_case__ :List[Any] = DeiTForImageClassificationWithTeacher(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ :List[str] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ :str = DeiTImageProcessor(size=__snake_case , crop_size=config.image_size )
snake_case__ :List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case__ :str = encoding["pixel_values"]
snake_case__ :List[str] = model(__snake_case )
snake_case__ :Optional[Any] = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 57
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 1
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__UpperCAmelCase : Dict = re.compile(R"\s+")
def lowercase_ ( __snake_case : int ) -> Dict:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(__snake_case , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowercase_ ( __snake_case : Any ) -> List[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = [len(__snake_case ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__snake_case ), "line_max": max(__snake_case )}
def lowercase_ ( __snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case__ :Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowercase_ ( __snake_case : List[str] , __snake_case : int ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any]=5 ) -> str:
'''simple docstring'''
snake_case__ :Dict = ["auto-generated", "autogenerated", "automatically generated"]
snake_case__ :List[Any] = example["content"].splitlines()
for _, line in zip(range(__snake_case ) , __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowercase_ ( __snake_case : List[str] , __snake_case : int=5 , __snake_case : List[str]=0.0_5 ) -> Optional[int]:
'''simple docstring'''
snake_case__ :int = ["unit tests", "test file", "configuration file"]
snake_case__ :Any = example["content"].splitlines()
snake_case__ :List[Any] = 0
snake_case__ :Union[str, Any] = 0
# first test
for _, line in zip(range(__snake_case ) , __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case__ :Dict = example["content"].count("\n" )
snake_case__ :Optional[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowercase_ ( __snake_case : int ) -> Tuple:
'''simple docstring'''
snake_case__ :str = ["def ", "class ", "for ", "while "]
snake_case__ :int = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowercase_ ( __snake_case : Tuple , __snake_case : Tuple=4 ) -> List[Any]:
'''simple docstring'''
snake_case__ :Tuple = example["content"].splitlines()
snake_case__ :Optional[int] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowercase_ ( __snake_case : str ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[str] = tokenizer(example["content"] , truncation=__snake_case )["input_ids"]
snake_case__ :int = len(example["content"] ) / len(__snake_case )
return {"ratio": ratio}
def lowercase_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :str = {}
results.update(get_hash(__snake_case ) )
results.update(line_stats(__snake_case ) )
results.update(alpha_stats(__snake_case ) )
results.update(char_token_ratio(__snake_case ) )
results.update(is_autogenerated(__snake_case ) )
results.update(is_config_or_test(__snake_case ) )
results.update(has_no_keywords(__snake_case ) )
results.update(has_few_assignments(__snake_case ) )
return results
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Any:
'''simple docstring'''
if not check_uniques(__snake_case , __snake_case ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowercase_ ( __snake_case : Dict ) -> Any:
'''simple docstring'''
with open(__snake_case , "rb" ) as f_in:
with gzip.open(str(__snake_case ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(__snake_case , __snake_case )
os.unlink(__snake_case )
# Settings
__UpperCAmelCase : Any = HfArgumentParser(PreprocessingArguments)
__UpperCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
__UpperCAmelCase : Any = multiprocessing.cpu_count()
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__UpperCAmelCase : int = time.time()
__UpperCAmelCase : Dict = load_dataset(args.dataset_name, split="train")
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__UpperCAmelCase : Optional[Any] = time.time()
__UpperCAmelCase : Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__UpperCAmelCase : Dict = set(ds.unique("hash"))
__UpperCAmelCase : List[Any] = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__UpperCAmelCase : List[str] = time.time()
__UpperCAmelCase : Tuple = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__UpperCAmelCase : Tuple = time.time()
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__UpperCAmelCase : Optional[int] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
__UpperCAmelCase : Any = output_dir / "data"
data_dir.mkdir(exist_ok=True)
__UpperCAmelCase : Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__UpperCAmelCase : Dict = str(data_dir / F'''file-{file_number+1:012}.json''')
__UpperCAmelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 57
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Optional[Any] = "ylacombe/bark-small"
snake_case__ :Dict = tempfile.mkdtemp()
snake_case__ :Tuple = "en_speaker_1"
snake_case__ :List[Any] = "This is a test string"
snake_case__ :List[Any] = "speaker_embeddings_path.json"
snake_case__ :Optional[Any] = "speaker_embeddings"
def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[str] = self.get_tokenizer()
snake_case__ :Tuple = BarkProcessor(tokenizer=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ :Tuple = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
snake_case__ :Tuple = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
snake_case__ :int = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="(BOS)" ,eos_token="(EOS)" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
snake_case__ :Optional[Any] = 35
snake_case__ :Optional[int] = 2
snake_case__ :str = 8
snake_case__ :Any = {
"semantic_prompt": np.ones(UpperCamelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case__ :Any = processor(text=self.input_string ,voice_preset=UpperCamelCase )
snake_case__ :List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(UpperCamelCase ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case__ :List[str] = os.path.join(self.tmpdirname ,"file.npz" )
np.savez(UpperCamelCase ,**UpperCamelCase )
snake_case__ :Union[str, Any] = processor(text=self.input_string ,voice_preset=UpperCamelCase )
snake_case__ :int = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(UpperCamelCase ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case__ :Dict = processor(text=self.input_string ,voice_preset=self.voice_preset )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = self.get_tokenizer()
snake_case__ :str = BarkProcessor(tokenizer=UpperCamelCase )
snake_case__ :List[Any] = processor(text=self.input_string )
snake_case__ :Optional[int] = tokenizer(
self.input_string ,padding="max_length" ,max_length=256 ,add_special_tokens=UpperCamelCase ,return_attention_mask=UpperCamelCase ,return_token_type_ids=UpperCamelCase ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 57
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 1
|
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowercase_ ( __snake_case : int , __snake_case : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=30 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=10 ,UpperCamelCase=0.02 ,) -> Dict:
snake_case__ :Any = parent
snake_case__ :Union[str, Any] = batch_size
snake_case__ :Tuple = image_size
snake_case__ :Any = patch_size
snake_case__ :Optional[int] = num_channels
snake_case__ :Dict = is_training
snake_case__ :str = use_labels
snake_case__ :List[Any] = hidden_size
snake_case__ :Dict = num_hidden_layers
snake_case__ :Tuple = num_attention_heads
snake_case__ :List[str] = intermediate_size
snake_case__ :List[Any] = hidden_act
snake_case__ :Optional[Any] = hidden_dropout_prob
snake_case__ :Tuple = attention_probs_dropout_prob
snake_case__ :Any = type_sequence_label_size
snake_case__ :int = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ :Union[str, Any] = (image_size // patch_size) ** 2
snake_case__ :Union[str, Any] = num_patches + 1
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :Union[str, Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :Union[str, Any] = FlaxViTModel(config=UpperCamelCase )
snake_case__ :Tuple = model(UpperCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ :List[str] = (self.image_size, self.image_size)
snake_case__ :Tuple = (self.patch_size, self.patch_size)
snake_case__ :Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> str:
snake_case__ :List[str] = self.type_sequence_label_size
snake_case__ :Optional[int] = FlaxViTForImageClassification(config=UpperCamelCase )
snake_case__ :List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ :List[Any] = 1
snake_case__ :str = FlaxViTForImageClassification(UpperCamelCase )
snake_case__ :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ :List[Any] = model(UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[str] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) ,
) :List[str] = config_and_inputs
snake_case__ :List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _snake_case ( _A , unittest.TestCase ):
_A = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCAmelCase_ ( self ) -> None:
snake_case__ :Optional[int] = FlaxViTModelTester(self )
snake_case__ :Union[str, Any] = ConfigTester(self ,config_class=UpperCamelCase ,has_text_modality=UpperCamelCase ,hidden_size=37 )
def lowerCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ , snake_case__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Any = model_class(UpperCamelCase )
snake_case__ :Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :Any = [*signature.parameters.keys()]
snake_case__ :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ , snake_case__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ :int = self._prepare_for_class(UpperCamelCase ,UpperCamelCase )
snake_case__ :Union[str, Any] = model_class(UpperCamelCase )
@jax.jit
def model_jitted(UpperCamelCase ,**UpperCamelCase ):
return model(pixel_values=UpperCamelCase ,**UpperCamelCase )
with self.subTest("JIT Enabled" ):
snake_case__ :Any = model_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ :Optional[Any] = model_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase ,UpperCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def lowerCAmelCase_ ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
snake_case__ :List[str] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
snake_case__ :Tuple = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCamelCase )
| 57
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 1
|
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
snake_case__ :Union[str, Any] = len(__snake_case ) // 2
# choose the middle 3 elements
snake_case__ :List[str] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase : Union[str, Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase_ ( __snake_case : Dict , __snake_case : Dict , __snake_case : Union[str, Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
snake_case__ :Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case__ :Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case__ :Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ :str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ :str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=99 ,UpperCamelCase=16 ,UpperCamelCase=2 ,UpperCamelCase=4 ,UpperCamelCase=4 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=0 ,UpperCamelCase=0.02 ,) -> Optional[Any]:
snake_case__ :Dict = parent
snake_case__ :int = batch_size
snake_case__ :Optional[Any] = seq_length
snake_case__ :Optional[Any] = is_training
snake_case__ :Optional[Any] = use_labels
snake_case__ :Union[str, Any] = vocab_size
snake_case__ :Optional[int] = hidden_size
snake_case__ :List[Any] = num_hidden_layers
snake_case__ :List[Any] = num_attention_heads
snake_case__ :Any = intermediate_size
snake_case__ :Any = hidden_act
snake_case__ :Dict = hidden_dropout_prob
snake_case__ :str = attention_probs_dropout_prob
snake_case__ :str = max_position_embeddings
snake_case__ :Optional[int] = eos_token_id
snake_case__ :Optional[int] = pad_token_id
snake_case__ :Dict = bos_token_id
snake_case__ :List[str] = initializer_range
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) ,3 ,self.vocab_size )
snake_case__ :str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) ,dtype=np.intaa )) ,-1 )
snake_case__ :Union[str, Any] = shift_tokens_right(UpperCamelCase ,1 ,2 )
snake_case__ :List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,initializer_range=self.initializer_range ,use_cache=UpperCamelCase ,)
snake_case__ :Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self ) -> int:
snake_case__ , snake_case__ :Any = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :List[Any] = 20
snake_case__ :str = model_class_name(UpperCamelCase )
snake_case__ :Union[str, Any] = model.encode(inputs_dict["input_ids"] )
snake_case__ , snake_case__ :Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
snake_case__ :Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="i4" )
snake_case__ :Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
snake_case__ :Optional[Any] = model.decode(
decoder_input_ids[:, :-1] ,UpperCamelCase ,decoder_attention_mask=UpperCamelCase ,past_key_values=UpperCamelCase ,decoder_position_ids=UpperCamelCase ,)
snake_case__ :Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="i4" )
snake_case__ :Any = model.decode(
decoder_input_ids[:, -1:] ,UpperCamelCase ,decoder_attention_mask=UpperCamelCase ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=UpperCamelCase ,)
snake_case__ :str = model.decode(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 ,msg=f'Max diff is {diff}' )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Tuple = 20
snake_case__ :Dict = model_class_name(UpperCamelCase )
snake_case__ :Optional[Any] = model.encode(inputs_dict["input_ids"] )
snake_case__ , snake_case__ :Tuple = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
snake_case__ :Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
snake_case__ :str = model.init_cache(decoder_input_ids.shape[0] ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
snake_case__ :Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,UpperCamelCase ,decoder_attention_mask=UpperCamelCase ,past_key_values=UpperCamelCase ,decoder_position_ids=UpperCamelCase ,)
snake_case__ :int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="i4" )
snake_case__ :Optional[Any] = model.decode(
decoder_input_ids[:, -1:] ,UpperCamelCase ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=UpperCamelCase ,decoder_position_ids=UpperCamelCase ,)
snake_case__ :List[str] = model.decode(UpperCamelCase ,UpperCamelCase ,decoder_attention_mask=UpperCamelCase )
snake_case__ :Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 ,msg=f'Max diff is {diff}' )
@require_flax
class _snake_case ( unittest.TestCase ):
_A = 99
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] ,dtype=np.intaa ,)
snake_case__ :List[Any] = input_ids.shape[0]
snake_case__ :Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size ,d_model=24 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=32 ,decoder_ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ , snake_case__ , snake_case__ :List[str] = self._get_config_and_data()
snake_case__ :str = FlaxBlenderbotForConditionalGeneration(UpperCamelCase )
snake_case__ :Optional[Any] = lm_model(input_ids=UpperCamelCase )
snake_case__ :Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Tuple = BlenderbotConfig(
vocab_size=self.vocab_size ,d_model=14 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=8 ,decoder_ffn_dim=8 ,max_position_embeddings=48 ,)
snake_case__ :str = FlaxBlenderbotForConditionalGeneration(UpperCamelCase )
snake_case__ :Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] ,dtype=np.intaa )
snake_case__ :Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] ,dtype=np.intaa )
snake_case__ :Any = lm_model(input_ids=UpperCamelCase ,decoder_input_ids=UpperCamelCase )
snake_case__ :str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] ,dtype=np.intaa )
snake_case__ :str = shift_tokens_right(UpperCamelCase ,1 ,2 )
snake_case__ :int = np.equal(UpperCamelCase ,1 ).astype(np.floataa ).sum()
snake_case__ :Dict = np.equal(UpperCamelCase ,1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape ,input_ids.shape )
self.assertEqual(UpperCamelCase ,n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] ,2 ).all() )
@require_flax
class _snake_case ( _A , unittest.TestCase , _A ):
_A = True
_A = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_A = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = FlaxBlenderbotModelTester(self )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ , snake_case__ :int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ , snake_case__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ :Optional[int] = self._prepare_for_class(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = model_class(UpperCamelCase )
@jax.jit
def encode_jitted(UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ):
return model.encode(input_ids=UpperCamelCase ,attention_mask=UpperCamelCase )
with self.subTest("JIT Enabled" ):
snake_case__ :Dict = encode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ :List[Any] = encode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase ,UpperCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ , snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ :Union[str, Any] = model_class(UpperCamelCase )
snake_case__ :Optional[Any] = model.encode(inputs_dict["input_ids"] ,inputs_dict["attention_mask"] )
snake_case__ :List[str] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
return model.decode(
decoder_input_ids=UpperCamelCase ,decoder_attention_mask=UpperCamelCase ,encoder_outputs=UpperCamelCase ,)
with self.subTest("JIT Enabled" ):
snake_case__ :Tuple = decode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ :int = decode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase ,UpperCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
snake_case__ :Optional[int] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case__ :Any = np.ones((1, 1) ) * model.config.eos_token_id
snake_case__ :Dict = model(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skipUnless(jax_device != "cpu" ,"3B test too slow on CPU." )
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Any = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
snake_case__ :Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
snake_case__ :str = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" ,from_pt=UpperCamelCase )
snake_case__ :List[str] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
snake_case__ :List[str] = ["Sam"]
snake_case__ :Dict = tokenizer(UpperCamelCase ,return_tensors="jax" )
snake_case__ :List[Any] = model.generate(**UpperCamelCase ,**UpperCamelCase )
snake_case__ :Union[str, Any] = "Sam is a great name. It means \"sun\" in Gaelic."
snake_case__ :List[Any] = tokenizer.batch_decode(UpperCamelCase ,**UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 57
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 1
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__UpperCAmelCase : Tuple = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowercase_ ( __snake_case : Tuple ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = {}
state_dict.pop("pixel_mean" , __snake_case )
state_dict.pop("pixel_std" , __snake_case )
snake_case__ :int = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ :int = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
snake_case__ :Optional[Any] = int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
snake_case__ :Tuple = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
snake_case__ :List[Any] = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
snake_case__ :int = key.replace("layers.2" , "proj_out" )
snake_case__ :Any = value
snake_case__ :List[Any] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def lowercase_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any]="ybelkada/segment-anything" ) -> str:
'''simple docstring'''
snake_case__ :int = hf_hub_download(__snake_case , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
snake_case__ :List[Any] = SamConfig()
elif "sam_vit_l" in model_name:
snake_case__ :Any = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case__ :Dict = SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
snake_case__ :Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case__ :Optional[Any] = SamConfig(
vision_config=__snake_case , )
snake_case__ :Optional[Any] = torch.load(__snake_case , map_location="cpu" )
snake_case__ :List[str] = replace_keys(__snake_case )
snake_case__ :List[str] = SamImageProcessor()
snake_case__ :Any = SamProcessor(image_processor=__snake_case )
snake_case__ :Dict = SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
snake_case__ :Optional[Any] = hf_model.to("cuda" )
snake_case__ :Dict = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
snake_case__ :List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("RGB" )
snake_case__ :Tuple = [[[4_00, 6_50]]]
snake_case__ :Tuple = [[1]]
snake_case__ :str = processor(images=np.array(__snake_case ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ :Tuple = hf_model(**__snake_case )
snake_case__ :Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
snake_case__ :Any = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ :List[Any] = hf_model(**__snake_case )
snake_case__ :str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
snake_case__ :Tuple = ((75, 2_75, 17_25, 8_50),)
snake_case__ :str = processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ :Optional[int] = hf_model(**__snake_case )
snake_case__ :List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
snake_case__ :Dict = [[[4_00, 6_50], [8_00, 6_50]]]
snake_case__ :Optional[Any] = [[1, 1]]
snake_case__ :str = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ :Any = hf_model(**__snake_case )
snake_case__ :List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
__UpperCAmelCase : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__UpperCAmelCase : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 57
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
def lowercase_ ( __snake_case : str , __snake_case : str ) -> str:
'''simple docstring'''
snake_case__ :int = len(__snake_case )
snake_case__ :int = len(__snake_case )
snake_case__ :int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case__ :list = []
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 57
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=3 ,UpperCamelCase=18 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=None ,) -> Optional[Any]:
snake_case__ :Optional[Any] = size if size is not None else {"shortest_edge": 20}
snake_case__ :List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
snake_case__ :Union[str, Any] = parent
snake_case__ :Union[str, Any] = batch_size
snake_case__ :Optional[Any] = num_channels
snake_case__ :List[str] = image_size
snake_case__ :List[str] = min_resolution
snake_case__ :Tuple = max_resolution
snake_case__ :Optional[Any] = do_resize
snake_case__ :Any = size
snake_case__ :Dict = do_center_crop
snake_case__ :int = crop_size
def lowerCAmelCase_ ( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( _A , unittest.TestCase ):
_A = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :List[Any] = MobileNetVaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size" ) )
self.assertTrue(hasattr(UpperCamelCase ,"do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase ,"crop_size" ) )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
snake_case__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def lowerCAmelCase_ ( self ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
snake_case__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,Image.Image )
# Test not batched input
snake_case__ :Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
snake_case__ :Union[str, Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
snake_case__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case__ :Dict = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
snake_case__ :List[Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
snake_case__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case__ :Any = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
snake_case__ :List[Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 57
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCAmelCase : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase : Any = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__UpperCAmelCase : Dict = spec.loader.load_module()
__UpperCAmelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCAmelCase : int = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__UpperCAmelCase : Optional[int] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case__ :Any = False
# source code of `config_class`
snake_case__ :List[str] = inspect.getsource(__snake_case )
snake_case__ :Any = _re_checkpoint.findall(__snake_case )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case__ , snake_case__ :Optional[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case__ :Dict = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case__ :Dict = True
break
snake_case__ :Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__snake_case )
if len(__snake_case ) > 0:
snake_case__ :int = "\n".join(sorted(__snake_case ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 57
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 1
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 1
|
from PIL import Image
def lowercase_ ( __snake_case : Image , __snake_case : int ) -> Image:
'''simple docstring'''
snake_case__ :int = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__snake_case : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__UpperCAmelCase : Dict = change_contrast(img, 1_7_0)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 57
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : int = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : str = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_1_2,
"facebook/dpr-ctx_encoder-multiset-base": 5_1_2,
}
__UpperCAmelCase : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": 5_1_2,
"facebook/dpr-question_encoder-multiset-base": 5_1_2,
}
__UpperCAmelCase : List[str] = {
"facebook/dpr-reader-single-nq-base": 5_1_2,
"facebook/dpr-reader-multiset-base": 5_1_2,
}
__UpperCAmelCase : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCAmelCase : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCAmelCase : List[Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A = DPRContextEncoderTokenizer
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A = DPRQuestionEncoderTokenizer
__UpperCAmelCase : Tuple = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__UpperCAmelCase : Dict = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__UpperCAmelCase : Any = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_A )
class _snake_case :
def __call__( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = False ,UpperCamelCase = False ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
UpperCamelCase ,padding=UpperCamelCase ,truncation=UpperCamelCase ,max_length=UpperCamelCase ,return_tensors=UpperCamelCase ,return_attention_mask=UpperCamelCase ,**UpperCamelCase ,)
elif titles is None or texts is None:
snake_case__ :Tuple = titles if texts is None else texts
return super().__call__(
UpperCamelCase ,UpperCamelCase ,padding=UpperCamelCase ,truncation=UpperCamelCase ,max_length=UpperCamelCase ,return_tensors=UpperCamelCase ,return_attention_mask=UpperCamelCase ,**UpperCamelCase ,)
snake_case__ :List[str] = titles if not isinstance(UpperCamelCase ,UpperCamelCase ) else [titles]
snake_case__ :List[Any] = texts if not isinstance(UpperCamelCase ,UpperCamelCase ) else [texts]
snake_case__ :Optional[Any] = len(UpperCamelCase )
snake_case__ :Tuple = questions if not isinstance(UpperCamelCase ,UpperCamelCase ) else [questions] * n_passages
assert len(UpperCamelCase ) == len(
UpperCamelCase ), f'There should be as many titles than texts but got {len(UpperCamelCase )} titles and {len(UpperCamelCase )} texts.'
snake_case__ :Dict = super().__call__(UpperCamelCase ,UpperCamelCase ,padding=UpperCamelCase ,truncation=UpperCamelCase )["input_ids"]
snake_case__ :int = super().__call__(UpperCamelCase ,add_special_tokens=UpperCamelCase ,padding=UpperCamelCase ,truncation=UpperCamelCase )["input_ids"]
snake_case__ :Tuple = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase ,UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ :Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ :int = attention_mask
return self.pad(UpperCamelCase ,padding=UpperCamelCase ,max_length=UpperCamelCase ,return_tensors=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 16 ,UpperCamelCase = 64 ,UpperCamelCase = 4 ,) -> List[DPRSpanPrediction]:
snake_case__ :Dict = reader_input["input_ids"]
snake_case__ , snake_case__ , snake_case__ :Union[str, Any] = reader_output[:3]
snake_case__ :Dict = len(UpperCamelCase )
snake_case__ :List[Any] = sorted(range(UpperCamelCase ) ,reverse=UpperCamelCase ,key=relevance_logits.__getitem__ )
snake_case__ :List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ :Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ :List[Any] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ :List[Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ :Tuple = len(UpperCamelCase )
snake_case__ :str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=UpperCamelCase ,top_spans=UpperCamelCase ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=UpperCamelCase ,start_index=UpperCamelCase ,end_index=UpperCamelCase ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> List[DPRSpanPrediction]:
snake_case__ :Any = []
for start_index, start_score in enumerate(UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ :str = sorted(UpperCamelCase ,key=lambda UpperCamelCase : x[1] ,reverse=UpperCamelCase )
snake_case__ :List[str] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
snake_case__ :str = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_A )
class _snake_case ( _A , _A ):
_A = VOCAB_FILES_NAMES
_A = READER_PRETRAINED_VOCAB_FILES_MAP
_A = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = READER_PRETRAINED_INIT_CONFIGURATION
_A = ['input_ids', 'attention_mask']
_A = DPRReaderTokenizer
| 57
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.