code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class a (_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : Any = CustomTokenizer pass
81
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } snake_case_ : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): snake_case_ : int = token_dict['''token'''] snake_case_ : Optional[int] = Tokenizer(Unigram() ) snake_case_ : int = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) snake_case_ : Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ), pre_tokenizers.Digits(individual_digits=__magic_name__ ), pre_tokenizers.Punctuation(), ] ) snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ) snake_case_ : Optional[Any] = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) snake_case_ : Optional[Any] = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) if isinstance(__magic_name__ , __magic_name__ ): snake_case_ : Dict = [files] self._tokenizer.train(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int: '''simple docstring''' snake_case_ : Any = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = json.loads(self._tokenizer.to_str() ) snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id'''] snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
60
0
"""simple docstring""" import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} UpperCAmelCase_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" UpperCAmelCase_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() UpperCAmelCase_ = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) UpperCAmelCase_ = math.ceil((result["total_count"] - 100) / 100 ) for i in range(lowerCAmelCase__ ): UpperCAmelCase_ = requests.get(url + f"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} UpperCAmelCase_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" UpperCAmelCase_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() UpperCAmelCase_ = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) UpperCAmelCase_ = math.ceil((result["total_count"] - 100) / 100 ) for i in range(lowerCAmelCase__ ): UpperCAmelCase_ = requests.get(url + f"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} UpperCAmelCase_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ ) UpperCAmelCase_ = result.headers["Location"] UpperCAmelCase_ = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ ) UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , f"""{artifact_name}.zip""" ) with open(lowerCAmelCase__ , "wb" ) as fp: fp.write(response.content ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = None with zipfile.ZipFile(lowerCAmelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCAmelCase__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(lowerCAmelCase__ ) as f: for line in f: UpperCAmelCase_ = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs UpperCAmelCase_ = line[: line.index(": " )] UpperCAmelCase_ = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed UpperCAmelCase_ = line[len("FAILED " ) :] failed_tests.append(lowerCAmelCase__ ) elif filename == "job_name.txt": UpperCAmelCase_ = line if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` """ f"""and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" " problem." ) UpperCAmelCase_ = None if job_name and job_links: UpperCAmelCase_ = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ ) # A list with elements of the form (line of error, error, failed test) UpperCAmelCase_ = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )] return result def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = [] UpperCAmelCase_ = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) ) return errors def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = Counter() counter.update([x[1] for x in logs] ) UpperCAmelCase_ = counter.most_common() UpperCAmelCase_ = {} for error, count in counts: if error_filter is None or error not in error_filter: UpperCAmelCase_ = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} UpperCAmelCase_ = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) ) return r def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = test.split("::" )[0] if test.startswith("tests/models/" ): UpperCAmelCase_ = test.split("/" )[2] else: UpperCAmelCase_ = None return test def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs] UpperCAmelCase_ = [x for x in logs if x[2] is not None] UpperCAmelCase_ = {x[2] for x in logs} UpperCAmelCase_ = {} for test in tests: UpperCAmelCase_ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) UpperCAmelCase_ = counter.most_common() UpperCAmelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} UpperCAmelCase_ = sum(error_counts.values() ) if n_errors > 0: UpperCAmelCase_ = {"count": n_errors, "errors": error_counts} UpperCAmelCase_ = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) ) return r def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = "| no. | error | status |" UpperCAmelCase_ = "|-:|:-|:-|" UpperCAmelCase_ = [header, sep] for error in reduced_by_error: UpperCAmelCase_ = reduced_by_error[error]["count"] UpperCAmelCase_ = f"""| {count} | {error[:100]} | |""" lines.append(lowerCAmelCase__ ) return "\n".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = "| model | no. of errors | major error | count |" UpperCAmelCase_ = "|-:|-:|-:|-:|" UpperCAmelCase_ = [header, sep] for model in reduced_by_model: UpperCAmelCase_ = reduced_by_model[model]["count"] UpperCAmelCase_ , UpperCAmelCase_ = list(reduced_by_model[model]["errors"].items() )[0] UpperCAmelCase_ = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(lowerCAmelCase__ ) return "\n".join(lowerCAmelCase__ ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") lowerCamelCase = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) lowerCamelCase = get_job_links(args.workflow_run_id, token=args.token) lowerCamelCase = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: lowerCamelCase = k.find(""" / """) lowerCamelCase = k[index + len(""" / """) :] lowerCamelCase = v with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) lowerCamelCase = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error lowerCamelCase = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors lowerCamelCase = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) lowerCamelCase = reduce_by_error(errors) lowerCamelCase = reduce_by_model(errors) lowerCamelCase = make_github_table(reduced_by_error) lowerCamelCase = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa) with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa)
82
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = [False] * len(_UpperCamelCase ) snake_case_ : int = [-1] * len(_UpperCamelCase ) def dfs(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Dict = True snake_case_ : Dict = c for u in graph[v]: if not visited[u]: dfs(_UpperCamelCase , 1 - c ) for i in range(len(_UpperCamelCase ) ): if not visited[i]: dfs(_UpperCamelCase , 0 ) for i in range(len(_UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
60
0
"""simple docstring""" from collections import deque class __snake_case : def __init__( self : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : List[Any] = process_name # process name _lowerCamelCase : List[Any] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time _lowerCamelCase : Optional[int] = arrival_time _lowerCamelCase : int = burst_time # remaining burst time _lowerCamelCase : Dict = 0 # total time of the process wait in ready queue _lowerCamelCase : Tuple = 0 # time from arrival time to completion time class __snake_case : def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ): """simple docstring""" _lowerCamelCase : Any = number_of_queues # time slice of queues that round robin algorithm applied _lowerCamelCase : Any = time_slices # unfinished process is in this ready_queue _lowerCamelCase : Dict = queue # current time _lowerCamelCase : List[Any] = current_time # finished process is in this sequence queue _lowerCamelCase : deque[Process] = deque() def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : list[Process] ): """simple docstring""" _lowerCamelCase : int = [] for i in range(len(__lowerCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : list[Process] ): """simple docstring""" _lowerCamelCase : str = [] for i in range(len(__lowerCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : list[Process] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [] for i in range(len(__lowerCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : deque[Process] ): """simple docstring""" return [q.burst_time for q in queue] def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Process ): """simple docstring""" process.waiting_time += self.current_time - process.stop_time return process.waiting_time def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : deque[Process] ): """simple docstring""" _lowerCamelCase : deque[Process] = deque() # sequence deque of finished process while len(__lowerCAmelCase ) != 0: _lowerCamelCase : str = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__lowerCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 _lowerCamelCase : List[Any] = 0 # set the process's turnaround time because it is finished _lowerCamelCase : Tuple = self.current_time - cp.arrival_time # set the completion time _lowerCamelCase : Optional[Any] = self.current_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__lowerCAmelCase ) ): _lowerCamelCase : Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__lowerCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time _lowerCamelCase : str = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__lowerCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished _lowerCamelCase : int = 0 # set the finish time _lowerCamelCase : Optional[int] = self.current_time # update the process' turnaround time because it is finished _lowerCamelCase : int = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" for i in range(self.number_of_queues - 1 ): _lowerCamelCase , _lowerCamelCase : Tuple = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCAmelCase__ = Process('''P1''', 0, 53) lowerCAmelCase__ = Process('''P2''', 0, 17) lowerCAmelCase__ = Process('''P3''', 0, 68) lowerCAmelCase__ = Process('''P4''', 0, 24) lowerCAmelCase__ = 3 lowerCAmelCase__ = [17, 25] lowerCAmelCase__ = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) lowerCAmelCase__ = Process('''P1''', 0, 53) lowerCAmelCase__ = Process('''P2''', 0, 17) lowerCAmelCase__ = Process('''P3''', 0, 68) lowerCAmelCase__ = Process('''P4''', 0, 24) lowerCAmelCase__ = 3 lowerCAmelCase__ = [17, 25] lowerCAmelCase__ = deque([Pa, Pa, Pa, Pa]) lowerCAmelCase__ = MLFQ(number_of_queues, time_slices, queue, 0) lowerCAmelCase__ = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( F"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( F"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
83
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int: '''simple docstring''' snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20} snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case_ : str = parent snake_case_ : Optional[int] = batch_size snake_case_ : Dict = num_channels snake_case_ : List[Any] = image_size snake_case_ : Union[str, Any] = min_resolution snake_case_ : Tuple = max_resolution snake_case_ : str = do_resize snake_case_ : Tuple = size snake_case_ : int = do_center_crop snake_case_ : Tuple = crop_size snake_case_ : int = do_normalize snake_case_ : Optional[Any] = image_mean snake_case_ : List[str] = image_std snake_case_ : str = do_reduce_labels def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] ) snake_case_ : str = Image.open(dataset[1]['''file'''] ) return image, map def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] ) snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] ) snake_case_ : List[str] = Image.open(ds[2]['''file'''] ) snake_case_ : str = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = BeitImageProcessingTester(self ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) snake_case_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) snake_case_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs() snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) snake_case_ : List[Any] = True snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
60
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A_ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=False , snake_case=True , snake_case=False , snake_case=True , snake_case=33 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope def SCREAMING_SNAKE_CASE__ ( self ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = ids_tensor([self.batch_size] , self.num_choices ) lowercase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self ): return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = EsmModel(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case ) lowercase = model(snake_case ) lowercase = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = EsmForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = EsmForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) = config_and_inputs lowercase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : str = False _UpperCamelCase : Dict = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _UpperCamelCase : Any = () _UpperCamelCase : Optional[Any] = ( { """feature-extraction""": EsmModel, """fill-mask""": EsmForMaskedLM, """text-classification""": EsmForSequenceClassification, """token-classification""": EsmForTokenClassification, """zero-shot""": EsmForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : Union[str, Any] = True def SCREAMING_SNAKE_CASE__ ( self ): lowercase = EsmModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase = type self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = EsmModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs()[0] lowercase = EsmEmbeddings(config=snake_case ) lowercase = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase = create_position_ids_from_input_ids(snake_case , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(snake_case , snake_case ) ) ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs()[0] lowercase = EsmEmbeddings(config=snake_case ) lowercase = torch.empty(2 , 4 , 30 ) lowercase = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase = embeddings.create_position_ids_from_inputs_embeds(snake_case ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(snake_case , snake_case ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def SCREAMING_SNAKE_CASE__ ( self ): pass @unittest.skip('Esm does not support embedding resizing' ) def SCREAMING_SNAKE_CASE__ ( self ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def SCREAMING_SNAKE_CASE__ ( self ): pass @require_torch class A_ ( __lowerCamelCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self ): with torch.no_grad(): lowercase = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase = model(snake_case )[0] lowercase = 33 lowercase = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , snake_case ) lowercase = torch.tensor( [[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): with torch.no_grad(): lowercase = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase = model(snake_case )[0] # compare the actual values for a slice. lowercase = torch.tensor( [[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
84
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : List[Any] = mean_squared_error( __magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ ) return {"mse": mse}
60
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[str] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : lowerCamelCase_ : Any = None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
60
0
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Any , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : Optional[Any] , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Any , UpperCAmelCase : int ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : Dict , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
86
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : str = None @staticmethod def lowerCamelCase () -> Any: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase (cls ) -> List[Any]: '''simple docstring''' return F'''`pip install {cls.pip_package or cls.name}`''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[int] = '''optuna''' @staticmethod def lowerCamelCase () -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_optuna(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''ray''' lowerCamelCase_ : List[str] = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase () -> List[Any]: '''simple docstring''' return is_ray_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_ray(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''sigopt''' @staticmethod def lowerCamelCase () -> Optional[int]: '''simple docstring''' return is_sigopt_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' return default_hp_space_sigopt(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''wandb''' @staticmethod def lowerCamelCase () -> Dict: '''simple docstring''' return is_wandb_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return default_hp_space_wandb(__magic_name__ ) lowerCAmelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_UpperCamelCase ) > 0: snake_case_ : Dict = available_backends[0].name if len(_UpperCamelCase ) > 1: logger.info( f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
60
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : Union[str, Any] = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = ["""ConvNextFeatureExtractor"""] _lowerCamelCase : int = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
87
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list: """simple docstring""" snake_case_ : Tuple = len(_UpperCamelCase ) snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): snake_case_ : Any = y_points[i] for i in range(2 , _UpperCamelCase ): for j in range(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[int] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
60
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
import unittest from knapsack import greedy_knapsack as kp class _lowerCamelCase( unittest.TestCase ): def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Optional[Any] = [10, 20, 30, 40, 50, 60] _lowercase : Optional[int] = [2, 4, 6, 8, 10, 12] _lowercase : Optional[int] = 1_00 self.assertEqual(kp.calc_profit(lowerCamelCase, lowerCamelCase, lowerCamelCase), 2_10) def UpperCamelCase ( self) -> List[Any]: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'max_weight must greater than zero.') def UpperCamelCase ( self) -> Any: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'Weight can not be negative.') def UpperCamelCase ( self) -> List[str]: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'Profit can not be negative.') def UpperCamelCase ( self) -> List[str]: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'max_weight must greater than zero.') def UpperCamelCase ( self) -> List[str]: """simple docstring""" self.assertRaisesRegex( lowerCamelCase, 'The length of profit and weight must be same.') if __name__ == "__main__": unittest.main()
89
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return getitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" return setitem, k, v def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" return delitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str: """simple docstring""" try: return fun(_UpperCamelCase , *_UpperCamelCase ), None except Exception as e: return None, e lowerCAmelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCAmelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Any = HashMap(initial_block_size=4 ) snake_case_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(_UpperCamelCase ): snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) assert my_res == py_res assert str(_UpperCamelCase ) == str(_UpperCamelCase ) assert set(_UpperCamelCase ) == set(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" def is_public(_UpperCamelCase ) -> bool: return not name.startswith('''_''' ) snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )} snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )} assert dict_public_names > hash_public_names
60
0
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase ) -> list: """simple docstring""" if len(_UpperCamelCase ) == 0: return [] snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase ) snake_case_ : List[str] = int(max_value - min_value ) + 1 snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )] for i in my_list: buckets[int(i - min_value )].append(_UpperCamelCase ) return [v for bucket in buckets for v in sorted(_UpperCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
60
0
"""simple docstring""" def _snake_case ( snake_case__ : int ): assert ( isinstance(snake_case__ , snake_case__ ) and number_of_steps > 0 ), F'number_of_steps needs to be positive integer, your input {number_of_steps}' if number_of_steps == 1: return 1 A , A = 1, 1 for _ in range(number_of_steps - 1 ): A , A = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
91
import tensorflow as tf from ...tf_utils import shape_list class __lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[Any] = vocab_size snake_case_ : Dict = d_embed snake_case_ : Union[str, Any] = d_proj snake_case_ : str = cutoffs + [vocab_size] snake_case_ : int = [0] + self.cutoffs snake_case_ : Optional[int] = div_val snake_case_ : int = self.cutoffs[0] snake_case_ : Any = len(self.cutoffs ) - 1 snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters snake_case_ : str = keep_order snake_case_ : int = [] snake_case_ : Union[str, Any] = [] def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' if self.n_clusters > 0: snake_case_ : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case_ : List[str] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , ) self.out_projs.append(__magic_name__ ) else: self.out_projs.append(__magic_name__ ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : List[str] = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i) snake_case_ : int = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' ) self.out_projs.append(__magic_name__ ) snake_case_ : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : Any = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(__magic_name__ ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = x if proj is not None: snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ ) return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = shape_list(__magic_name__ ) snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) snake_case_ : Dict = tf.stack([r, target] , 1 ) return tf.gather_nd(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = 0 if self.n_clusters == 0: snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ ) snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 ) else: snake_case_ : Optional[int] = shape_list(__magic_name__ ) snake_case_ : int = [] snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case_ : str = (target >= l_idx) & (target < r_idx) snake_case_ : Dict = tf.where(__magic_name__ ) snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx if self.div_val == 1: snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx] snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx] else: snake_case_ : Union[str, Any] = self.out_layers[i][0] snake_case_ : int = self.out_layers[i][1] if i == 0: snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 ) snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] ) snake_case_ : Any = tf.nn.log_softmax(__magic_name__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ ) else: snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] ) snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ ) snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__magic_name__ ) if target is not None: snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) ) snake_case_ : str = tf.concat(__magic_name__ , axis=-1 ) if target is not None: if return_mean: snake_case_ : int = tf.reduce_mean(__magic_name__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__magic_name__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
60
0
'''simple docstring''' # Imports import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None ): '''simple docstring''' self.set_matricies(red=UpperCAmelCase__ , green=UpperCAmelCase__ , blue=UpperCAmelCase__ , red_edge=UpperCAmelCase__ , nir=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=None ): '''simple docstring''' if red is not None: lowercase : int =red if green is not None: lowercase : int =green if blue is not None: lowercase : Tuple =blue if red_edge is not None: lowercase : Union[str, Any] =red_edge if nir is not None: lowercase : Optional[int] =nir return True def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int="" , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None ): '''simple docstring''' self.set_matricies(red=UpperCAmelCase__ , green=UpperCAmelCase__ , blue=UpperCAmelCase__ , red_edge=UpperCAmelCase__ , nir=UpperCAmelCase__ ) lowercase : int ={ '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def lowerCamelCase_ ( self : Any ): '''simple docstring''' return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.nir * (self.red / (self.green**2)) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def lowerCamelCase_ ( self : int ): '''simple docstring''' return (self.nir - self.red) / (self.nir + self.red) def lowerCamelCase_ ( self : str ): '''simple docstring''' return (self.nir - self.blue) / (self.nir + self.blue) def lowerCamelCase_ ( self : Any ): '''simple docstring''' return (self.redEdge - self.red) / (self.redEdge + self.red) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any]=0.08 , UpperCAmelCase__ : Tuple=1.22 , UpperCAmelCase__ : List[str]=0.03 ): '''simple docstring''' return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.nir / self.green) - 1 def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return (self.nir / self.redEdge) - 1 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.red - self.blue) / self.red def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.nir - self.green def lowerCamelCase_ ( self : str ): '''simple docstring''' return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Union[str, Any] =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any]=0.16 ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green + y) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int=0.5 ): '''simple docstring''' return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def lowerCamelCase_ ( self : str ): '''simple docstring''' return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None ): '''simple docstring''' return (self.nir - b) / (a * self.red) def lowerCamelCase_ ( self : int ): '''simple docstring''' return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return (self.red + self.green + self.blue) / 30.5 def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self.nir / self.red def lowerCamelCase_ ( self : int ): '''simple docstring''' return (self.rvi() - 1) / (self.rvi() + 1) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.green / (self.nir + self.red + self.green) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self.nir / (self.nir + self.red + self.green) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.red / (self.nir + self.red + self.green) def lowerCamelCase_ ( self : Any ): '''simple docstring''' return (self.green - self.red) / (self.green + self.red) def lowerCamelCase_ ( self : Any ): '''simple docstring''' return (self.red - self.green) / (self.red + self.green) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowercase : Union[str, Any] =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self.nir / self.red def lowerCamelCase_ ( self : int ): '''simple docstring''' return (self.ndvi() + 0.5) ** (1 / 2) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return (self.nir - self.redEdge) / (self.nir + self.redEdge)
92
import requests def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Tuple = {'''Content-Type''': '''application/json'''} snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase ) if response.status_code != 200: snake_case_ : List[Any] = ( '''Request to slack returned an error ''' f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(_UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
60
0
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[str, Any] = (EulerDiscreteScheduler,) __magic_name__ :Any = 10 def snake_case ( self , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = { 'num_train_timesteps': 1_1_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**__UpperCAmelCase ) return config def snake_case ( self ): '''simple docstring''' for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.scheduler_classes[0] lowerCAmelCase__ :int = self.get_scheduler_config() lowerCAmelCase__ :Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ :str = torch.manual_seed(0 ) lowerCAmelCase__ :Tuple = self.dummy_model() lowerCAmelCase__ :Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ :List[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ :Tuple = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Dict = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) lowerCAmelCase__ :Any = output.prev_sample lowerCAmelCase__ :int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :Union[str, Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 10.08_07 ) < 1E-2 assert abs(result_mean.item() - 0.01_31 ) < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.scheduler_classes[0] lowerCAmelCase__ :Any = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ :int = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ :int = torch.manual_seed(0 ) lowerCAmelCase__ :int = self.dummy_model() lowerCAmelCase__ :Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ :Any = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ :List[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) lowerCAmelCase__ :Any = output.prev_sample lowerCAmelCase__ :Tuple = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :Tuple = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 0.00_02 ) < 1E-2 assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = self.scheduler_classes[0] lowerCAmelCase__ :Tuple = self.get_scheduler_config() lowerCAmelCase__ :Dict = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ :int = torch.manual_seed(0 ) lowerCAmelCase__ :List[Any] = self.dummy_model() lowerCAmelCase__ :int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCAmelCase__ :Dict = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ :Optional[int] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = output.prev_sample lowerCAmelCase__ :Optional[int] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :List[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 10.08_07 ) < 1E-2 assert abs(result_mean.item() - 0.01_31 ) < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ :str = self.get_scheduler_config() lowerCAmelCase__ :Optional[Any] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase__ :str = self.dummy_model() lowerCAmelCase__ :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCAmelCase__ :Tuple = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ :Any = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) lowerCAmelCase__ :Dict = output.prev_sample lowerCAmelCase__ :str = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :str = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2 assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
93
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
'''simple docstring''' import re from filelock import FileLock try: import nltk SCREAMING_SNAKE_CASE = True except (ImportError, ModuleNotFoundError): SCREAMING_SNAKE_CASE = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowercase_ ( __A : str ) -> str: """simple docstring""" re.sub('''<n>''' , '''''' , __A ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__A ) )
94
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''owlvit_text_model''' def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : int = vocab_size snake_case_ : str = hidden_size snake_case_ : List[Any] = intermediate_size snake_case_ : str = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : Union[str, Any] = initializer_range snake_case_ : int = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit_vision_model''' def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : Optional[Any] = hidden_size snake_case_ : Union[str, Any] = intermediate_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : List[Any] = num_channels snake_case_ : Union[str, Any] = image_size snake_case_ : Dict = patch_size snake_case_ : List[Any] = hidden_act snake_case_ : Tuple = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : List[str] = initializer_range snake_case_ : List[Any] = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit''' lowerCamelCase_ : Optional[int] = True def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) if text_config is None: snake_case_ : Tuple = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: snake_case_ : str = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) snake_case_ : str = OwlViTTextConfig(**__magic_name__ ) snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ ) snake_case_ : Any = projection_dim snake_case_ : Union[str, Any] = logit_scale_init_value snake_case_ : str = return_dict snake_case_ : Any = 1.0 @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str: '''simple docstring''' snake_case_ : Optional[int] = {} snake_case_ : Union[str, Any] = text_config snake_case_ : Optional[Any] = vision_config return cls.from_dict(__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : List[Any] = self.text_config.to_dict() snake_case_ : List[Any] = self.vision_config.to_dict() snake_case_ : Tuple = self.__class__.model_type return output class __lowerCAmelCase ( _a ): @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-4 def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : Dict = super().generate_dummy_inputs( processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ ) snake_case_ : List[str] = super().generate_dummy_inputs( processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ ) return {**text_input_dict, **image_input_dict} @property def lowerCamelCase (self ) -> int: '''simple docstring''' return 14
60
0
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class UpperCamelCase_ (yaml.SafeLoader ): def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Any ) -> int: UpperCAmelCase_ : int = [self.constructed_objects[key_node] for key_node, _ in node.value] UpperCAmelCase_ : Union[str, Any] = [tuple(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else key for key in keys] UpperCAmelCase_ : List[Any] = Counter(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=False ) -> Any: UpperCAmelCase_ : int = super().construct_mapping(lowerCAmelCase_ , deep=lowerCAmelCase_ ) self._check_no_duplicates_on_constructed_node(lowerCAmelCase_ ) return mapping def snake_case ( A__ ): UpperCAmelCase_ : int = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: UpperCAmelCase_ : Dict = full_content[1:].index("---" ) + 1 UpperCAmelCase_ : List[str] = "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(A__ ) class UpperCamelCase_ (__A ): # class attributes __magic_name__ = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase_ : Path ) -> "DatasetMetadata": with open(lowerCAmelCase_ , encoding="utf-8" ) as readme_file: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowerCAmelCase_ ) else: return cls() def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Path ) -> List[str]: if path.exists(): with open(lowerCAmelCase_ , encoding="utf-8" ) as readme_file: UpperCAmelCase_ : str = readme_file.read() else: UpperCAmelCase_ : str = None UpperCAmelCase_ : Optional[int] = self._to_readme(lowerCAmelCase_ ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as readme_file: readme_file.write(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[str] = None ) -> str: if readme_content is not None: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = _split_yaml_from_readme(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = "---\n" + self.to_yaml_string() + "---\n" + content else: UpperCAmelCase_ : Optional[int] = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple , lowerCAmelCase_ : str ) -> "DatasetMetadata": UpperCAmelCase_ : Tuple = yaml.load(lowerCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields UpperCAmelCase_ : Dict = { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ , encoding="utf-8" , ).decode("utf-8" ) lowerCamelCase_ = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCamelCase_ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') lowerCamelCase_ = ap.parse_args() lowerCamelCase_ = Path(args.readme_filepath) lowerCamelCase_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
95
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch'''] lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate''' lowerCamelCase_ : Tuple = '''default_config.yaml''' lowerCamelCase_ : str = config_folder / config_file lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml''' lowerCamelCase_ : Dict = Path('''tests/test_configs''' ) @classmethod def lowerCamelCase (cls ) -> Dict: '''simple docstring''' if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCamelCase (cls ) -> Any: '''simple docstring''' if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=__magic_name__ ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : List[str] = '''test-tpu''' lowerCamelCase_ : Dict = '''us-central1-a''' lowerCamelCase_ : Any = '''ls''' lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config'''] lowerCamelCase_ : Tuple = '''cd /usr/share''' lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh''' lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh''' def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Tuple = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
60
0
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip __lowerCamelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def a ( __UpperCAmelCase : Tuple ) -> Union[str, Any]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Dict: return max(metric_fn(__UpperCAmelCase , __UpperCAmelCase ) for gt in ground_truths ) def a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> int: __magic_name__: Optional[Any] = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()] __magic_name__: Union[str, Any] = [] if args.gold_data_mode == "qa": __magic_name__: Union[str, Any] = pd.read_csv(__UpperCAmelCase , sep="""\t""" , header=__UpperCAmelCase ) for answer_list in data[1]: __magic_name__: List[Any] = ast.literal_eval(__UpperCAmelCase ) answers.append(__UpperCAmelCase ) else: __magic_name__: Any = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()] __magic_name__: List[Any] = [[reference] for reference in references] __magic_name__: Any = 0 for prediction, ground_truths in zip(__UpperCAmelCase , __UpperCAmelCase ): total += 1 em += metric_max_over_ground_truths(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) fa += metric_max_over_ground_truths(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __magic_name__: Union[str, Any] = 1_00.0 * em / total __magic_name__: str = 1_00.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> Union[str, Any]: __magic_name__: List[str] = args.k __magic_name__: List[str] = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()] __magic_name__: int = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()] __magic_name__: int = 0 for hypo, reference in zip(__UpperCAmelCase , __UpperCAmelCase ): __magic_name__: List[Any] = set(hypo.split("""\t""" )[:k] ) __magic_name__: Dict = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __magic_name__: int = 1_00.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def a ( __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Any: def strip_title(__UpperCAmelCase : Optional[int] ): if title.startswith("""\"""" ): __magic_name__: int = title[1:] if title.endswith("""\"""" ): __magic_name__: Tuple = title[:-1] return title __magic_name__: List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCAmelCase , return_tensors="""pt""" , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , )["""input_ids"""].to(args.device ) __magic_name__: Tuple = rag_model.rag.question_encoder(__UpperCAmelCase ) __magic_name__: List[Any] = question_enc_outputs[0] __magic_name__: Optional[int] = rag_model.retriever( __UpperCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) __magic_name__: Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __magic_name__: Any = [] for docs in all_docs: __magic_name__: Any = [strip_title(__UpperCAmelCase ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__UpperCAmelCase ) ) return provenance_strings def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> str: with torch.no_grad(): __magic_name__: str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCAmelCase , return_tensors="""pt""" , padding=__UpperCAmelCase , truncation=__UpperCAmelCase ) __magic_name__: int = inputs_dict.input_ids.to(args.device ) __magic_name__: Optional[int] = inputs_dict.attention_mask.to(args.device ) __magic_name__: Union[str, Any] = rag_model.generate( # rag_model overwrites generate __UpperCAmelCase , attention_mask=__UpperCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __magic_name__: Any = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) if args.print_predictions: for q, a in zip(__UpperCAmelCase , __UpperCAmelCase ): logger.info("""Q: {} - A: {}""".format(__UpperCAmelCase , __UpperCAmelCase ) ) return answers def a ( ) -> Dict: __magic_name__: Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__UpperCAmelCase , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__UpperCAmelCase , choices=["""exact""", """compressed""", """legacy"""] , type=__UpperCAmelCase , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__UpperCAmelCase , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__UpperCAmelCase , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__UpperCAmelCase , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__UpperCAmelCase , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__UpperCAmelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__UpperCAmelCase , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__UpperCAmelCase , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__UpperCAmelCase , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=5_0 , type=__UpperCAmelCase , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) __magic_name__: List[str] = parser.parse_args() __magic_name__: Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def a ( __UpperCAmelCase : Optional[Any] ) -> Tuple: __magic_name__: Optional[Any] = {} if args.model_type is None: __magic_name__: Any = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): __magic_name__: Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration __magic_name__: List[Any] = args.n_docs if args.index_name is not None: __magic_name__: Optional[int] = args.index_name if args.index_path is not None: __magic_name__: Any = args.index_path else: __magic_name__: Any = BartForConditionalGeneration __magic_name__: Any = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __UpperCAmelCase ) __magic_name__: Union[str, Any] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k __magic_name__: Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__UpperCAmelCase , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__UpperCAmelCase ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): __magic_name__: Optional[int] = RagRetriever.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) __magic_name__: Dict = model_class.from_pretrained(__UpperCAmelCase , retriever=__UpperCAmelCase , **__UpperCAmelCase ) model.retriever.init_retrieval() else: __magic_name__: Optional[Any] = model_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: __magic_name__: List[Any] = [] for line in tqdm(__UpperCAmelCase ): questions.append(line.strip() ) if len(__UpperCAmelCase ) == args.eval_batch_size: __magic_name__: Union[str, Any] = evaluate_batch_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) preds_file.write("""\n""".join(__UpperCAmelCase ) + """\n""" ) preds_file.flush() __magic_name__: List[str] = [] if len(__UpperCAmelCase ) > 0: __magic_name__: Dict = evaluate_batch_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) preds_file.write("""\n""".join(__UpperCAmelCase ) ) preds_file.flush() score_fn(__UpperCAmelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": __lowerCamelCase = get_args() main(args)
96
import warnings from ..trainer import Trainer from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict: '''simple docstring''' warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , __magic_name__ , ) super().__init__(args=__magic_name__ , **__magic_name__ )
60
0
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class lowercase__( unittest.TestCase ): """simple docstring""" a :List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING a :Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def _lowercase ( self : Tuple ) -> Dict: lowercase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowercase_ = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE_ ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )}, ] , ) lowercase_ = text_generator.model.config.eos_token_id lowercase_ = '''<pad>''' lowercase_ = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE_ , ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ [ {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )}, ], ] , ) @require_tf def _lowercase ( self : str ) -> Optional[int]: lowercase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowercase_ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE_ ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: lowercase_ = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) return text_generator, ["This is a test", "Another test"] def _lowercase ( self : List[Any] ) -> Optional[int]: lowercase_ = '''Hello I believe in''' lowercase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , stop_sequence=''' fe''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': '''Hello I believe in fe'''}] ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: lowercase_ = text_generator.model lowercase_ = text_generator.tokenizer lowercase_ = text_generator('''This is a test''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowercase_ = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowercase_ = pipeline(task='''text-generation''' , model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , return_full_text=SCREAMING_SNAKE_CASE_ ) lowercase_ = text_generator('''This is a test''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowercase_ = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowercase_ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE_ ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}], [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowercase_ = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE_ ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}], [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}], ] , ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): lowercase_ = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE_ , return_text=SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): lowercase_ = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): lowercase_ = text_generator('''test''' , return_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowercase_ = text_generator('''''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowercase_ = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowercase_ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0_0_0_0 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_0_0 , max_new_tokens=2_0 ) lowercase_ = text_generator('''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=2_0 ) # Hole strategy cannot work with self.assertRaises(SCREAMING_SNAKE_CASE_ ): text_generator( '''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 1_0 , ) @require_torch @require_accelerate @require_torch_gpu def _lowercase ( self : Union[str, Any] ) -> Tuple: import torch # Classic `model_kwargs` lowercase_ = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase_ = pipe('''This is a test''' ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase_ = pipe('''This is a test''' ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowercase_ = pipe('''This is a test''' ) self.assertEqual( SCREAMING_SNAKE_CASE_ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def _lowercase ( self : Optional[int] ) -> List[Any]: import torch lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def _lowercase ( self : int ) -> Dict: import torch lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ , top_p=0.5 ) def _lowercase ( self : List[Any] ) -> int: lowercase_ = '''Hello world''' lowercase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowercase_ = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowercase_ = logging.get_logger('''transformers.generation.utils''' ) lowercase_ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl: lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_length=1_0 , max_new_tokens=1 ) self.assertIn(SCREAMING_SNAKE_CASE_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl: lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 ) self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out ) with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl: lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_length=1_0 ) self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out )
97
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : str = '''mock-s3-bucket''' snake_case_ : str = f'''s3://{mock_bucket}''' snake_case_ : Any = extract_path_from_uri(_UpperCamelCase ) assert dataset_path.startswith('''s3://''' ) is False snake_case_ : Optional[Any] = '''./local/path''' snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase ) assert dataset_path == new_dataset_path def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase ) assert is_remote is True snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' ) snake_case_ : int = is_remote_filesystem(_UpperCamelCase ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol] if input_path is None: snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_UpperCamelCase ) snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase ) assert isinstance(_UpperCamelCase , _UpperCamelCase ) snake_case_ : int = os.path.basename(_UpperCamelCase ) snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} snake_case_ : Any = compressed_file_paths[protocol] snake_case_ : Any = '''dataset.jsonl''' snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}''' snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase ) assert fs.isfile(_UpperCamelCase ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase ) snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(_UpperCamelCase ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Tuple = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase ) with pytest.warns(_UpperCamelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_UpperCamelCase ) == 1 assert ( str(warning_info[0].message ) == f'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
60
0
'''simple docstring''' import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures lowercase__ : str = logging.get_logger(__name__) @dataclass class __lowerCAmelCase : """simple docstring""" _snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) _snake_case : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) _snake_case : int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case : bool = field( default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = self.task_name.lower() class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[Any] = 'train' _snake_case : Optional[int] = 'dev' _snake_case : List[Any] = 'test' class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : GlueDataTrainingArguments _snake_case : str _snake_case : List[InputFeatures] def __init__( self : int , lowerCAmelCase__ : GlueDataTrainingArguments , lowerCAmelCase__ : PreTrainedTokenizerBase , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Union[str, Split] = Split.train , lowerCAmelCase__ : Optional[str] = None , ) -> str: '''simple docstring''' warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , lowerCAmelCase__ , ) _UpperCamelCase = args _UpperCamelCase = glue_processors[args.task_name]() _UpperCamelCase = glue_output_modes[args.task_name] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): try: _UpperCamelCase = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file _UpperCamelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) _UpperCamelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) _UpperCamelCase , _UpperCamelCase = label_list[2], label_list[1] _UpperCamelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _UpperCamelCase = cached_features_file + '''.lock''' with FileLock(lowerCAmelCase__ ): if os.path.exists(lowerCAmelCase__ ) and not args.overwrite_cache: _UpperCamelCase = time.time() _UpperCamelCase = torch.load(lowerCAmelCase__ ) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(f"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: _UpperCamelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: _UpperCamelCase = self.processor.get_test_examples(args.data_dir ) else: _UpperCamelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: _UpperCamelCase = examples[:limit_length] _UpperCamelCase = glue_convert_examples_to_features( lowerCAmelCase__ , lowerCAmelCase__ , max_length=args.max_seq_length , label_list=lowerCAmelCase__ , output_mode=self.output_mode , ) _UpperCamelCase = time.time() torch.save(self.features , lowerCAmelCase__ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self : Union[str, Any] ) -> Dict: '''simple docstring''' return len(self.features ) def __getitem__( self : Tuple , lowerCAmelCase__ : List[str] ) -> InputFeatures: '''simple docstring''' return self.features[i] def snake_case__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' return self.label_list
98
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[Any] = '''encoder-decoder''' lowerCamelCase_ : Optional[Any] = True def __init__(self , **__magic_name__ ) -> Optional[int]: '''simple docstring''' super().__init__(**__magic_name__ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case_ : Any = kwargs.pop('''encoder''' ) snake_case_ : Tuple = encoder_config.pop('''model_type''' ) snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' ) snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : Any = True @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case_ : Tuple = True snake_case_ : Optional[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : str = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.encoder.to_dict() snake_case_ : Dict = self.decoder.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
60
0
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration SCREAMING_SNAKE_CASE = pytest.mark.integration SCREAMING_SNAKE_CASE = {'comet'} SCREAMING_SNAKE_CASE = importlib.util.find_spec('fairseq') is not None SCREAMING_SNAKE_CASE = {'code_eval'} SCREAMING_SNAKE_CASE = os.name == 'nt' SCREAMING_SNAKE_CASE = {'bertscore', 'frugalscore', 'perplexity'} SCREAMING_SNAKE_CASE = importlib.util.find_spec('transformers') is not None def a (lowerCAmelCase__ ): @wraps(lowerCAmelCase__ ) def wrapper(self , lowerCAmelCase__ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , lowerCAmelCase__ ) return wrapper def a (lowerCAmelCase__ ): @wraps(lowerCAmelCase__ ) def wrapper(self , lowerCAmelCase__ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , lowerCAmelCase__ ) return wrapper def a (lowerCAmelCase__ ): @wraps(lowerCAmelCase__ ) def wrapper(self , lowerCAmelCase__ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , lowerCAmelCase__ ) return wrapper def a (): __a = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __A , __A , __A ) @local class __UpperCAmelCase ( parameterized.TestCase ): """simple docstring""" _lowerCamelCase = {} _lowerCamelCase = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" ) def snake_case_ ( self , __A ): __a = """[...]""" __a = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __A ) ).module_path ) __a = datasets.load.import_main_class(metric_module.__name__ , dataset=__A ) # check parameters __a = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__A , metric_module.__name__ ): with self.use_local_metrics(): try: __a = doctest.testmod(__A , verbose=__A , raise_on_error=__A ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def snake_case_ ( self , __A ): __a = """[...]""" __a = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __A ) ).module_path ) # run doctest with self.use_local_metrics(): __a = doctest.testmod(__A , verbose=__A , raise_on_error=__A ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def snake_case_ ( self , __A , __A ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__A ): yield else: yield @contextmanager def snake_case_ ( self ): def load_local_metric(__A , *__A , **__A ): return load_metric(os.path.join("""metrics""" , __A ) , *__A , **__A ) with patch("""datasets.load_metric""" ) as mock_load_metric: __a = load_local_metric yield @classmethod def snake_case_ ( cls , __A ): def wrapper(__A ): __a = contextmanager(__A ) __a = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def a (lowerCAmelCase__ ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class __UpperCAmelCase ( __A ): """simple docstring""" def snake_case_ ( self , __A ): assert len(input_dict["""input_ids"""] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: __a = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def a (lowerCAmelCase__ ): import torch def bert_cos_score_idf(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: __a = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def a (lowerCAmelCase__ ): def load_from_checkpoint(lowerCAmelCase__ ): class __UpperCAmelCase : """simple docstring""" def snake_case_ ( self , __A , *__A , **__A ): assert len(__A ) == 2 __a = [0.19, 0.92] return scores, sum(__A ) / len(__A ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: __a = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: __a = load_from_checkpoint yield def a (): __a = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) __a = """ERROR""" __a = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ): metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase__ )
99
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[int] = question_encoder snake_case_ : Optional[int] = generator snake_case_ : Optional[Any] = self.question_encoder def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' ) snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any: '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ ) if config is None: snake_case_ : int = RagConfig.from_pretrained(__magic_name__ ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' return self.generator.decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = self.question_encoder def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.generator def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding: '''simple docstring''' warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __magic_name__ , ) if max_length is None: snake_case_ : Dict = self.current_tokenizer.model_max_length snake_case_ : List[str] = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: snake_case_ : Optional[int] = self.current_tokenizer.model_max_length snake_case_ : Union[str, Any] = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) snake_case_ : str = labels['''input_ids'''] return model_inputs
60
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _A : Any = logging.get_logger(__name__) class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ : Any = ["""input_features""", """attention_mask"""] def __init__( self , A_=80 , A_=1_60_00 , A_=80 , A_=0.0 , A_=True , A_=True , A_=True , **A_ , ): '''simple docstring''' super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ ) SCREAMING_SNAKE_CASE__ = num_mel_bins SCREAMING_SNAKE_CASE__ = do_ceptral_normalize SCREAMING_SNAKE_CASE__ = normalize_means SCREAMING_SNAKE_CASE__ = normalize_vars SCREAMING_SNAKE_CASE__ = True def lowercase_ ( self , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers SCREAMING_SNAKE_CASE__ = torch.from_numpy(A_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ = ta_kaldi.fbank(A_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def lowercase_ ( A_ , A_ , A_ = True , A_ = True , A_ = 0.0 , ): '''simple docstring''' if normalize_means: SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 ) SCREAMING_SNAKE_CASE__ = np.subtract(A_ , A_ ) if normalize_vars: SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 ) SCREAMING_SNAKE_CASE__ = np.divide(A_ , A_ ) if input_length < x.shape[0]: SCREAMING_SNAKE_CASE__ = padding_value # make sure array is in float32 SCREAMING_SNAKE_CASE__ = x.astype(np.floataa ) return x def lowercase_ ( self , A_ , A_ = None ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(A_ , A_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(A_ , A_ ) ] def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) SCREAMING_SNAKE_CASE__ = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE__ = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE__ = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): SCREAMING_SNAKE_CASE__ = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE__ = [raw_speech] # extract fbank features SCREAMING_SNAKE_CASE__ = [self._extract_fbank_features(A_ ) for waveform in raw_speech] # convert into correct format for padding SCREAMING_SNAKE_CASE__ = BatchFeature({'''input_features''': features} ) SCREAMING_SNAKE_CASE__ = self.pad( A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , ) # make sure list is in array format SCREAMING_SNAKE_CASE__ = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , A_ ): SCREAMING_SNAKE_CASE__ = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features] SCREAMING_SNAKE_CASE__ = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: SCREAMING_SNAKE_CASE__ = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: SCREAMING_SNAKE_CASE__ = ( np.array(A_ , dtype=np.intaa ) if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD else None ) SCREAMING_SNAKE_CASE__ = self.normalize( padded_inputs['''input_features'''] , attention_mask=A_ ) if return_tensors is not None: SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(A_ ) return padded_inputs
100
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Optional[Any] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : List[Any] = use_labels snake_case_ : Optional[int] = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : Any = hidden_act snake_case_ : List[str] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = type_sequence_label_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ : Any = (image_size // patch_size) ** 2 snake_case_ : int = num_patches + 1 def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : List[Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : int = self.get_config() return config, pixel_values, labels def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = ViTMSNModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = self.type_sequence_label_size snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : Optional[int] = 1 snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Any = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowerCamelCase_ : Optional[int] = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : List[Any] = ViTMSNModelTester(self ) snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(__magic_name__ ) snake_case_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[int] = [*signature.parameters.keys()] snake_case_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' torch.manual_seed(2 ) snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ ) snake_case_ : str = self.default_image_processor snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**__magic_name__ ) # verify the logits snake_case_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
60
0
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__) lowerCAmelCase__ : List[str] ={ 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } lowerCAmelCase__ : List[Any] ={ 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } lowerCAmelCase__ : List[str] ={ 'ctrl': 2_56, } lowerCAmelCase__ : Optional[int] ={ 'Pregnancy': 16_86_29, 'Christianity': 76_75, 'Explain': 10_64_23, 'Fitness': 6_34_40, 'Saving': 6_31_63, 'Ask': 2_71_71, 'Ass': 9_59_85, 'Joke': 16_35_09, 'Questions': 4_56_22, 'Thoughts': 4_96_05, 'Retail': 5_23_42, 'Feminism': 16_43_38, 'Writing': 1_19_92, 'Atheism': 19_22_63, 'Netflix': 4_86_16, 'Computing': 3_96_39, 'Opinion': 4_32_13, 'Alone': 4_49_67, 'Funny': 5_89_17, 'Gaming': 4_03_58, 'Human': 40_88, 'India': 13_31, 'Joker': 7_71_38, 'Diet': 3_62_06, 'Legal': 1_18_59, 'Norman': 49_39, 'Tip': 7_26_89, 'Weight': 5_23_43, 'Movies': 4_62_73, 'Running': 2_34_25, 'Science': 20_90, 'Horror': 3_77_93, 'Confession': 6_05_72, 'Finance': 1_22_50, 'Politics': 1_63_60, 'Scary': 19_19_85, 'Support': 1_26_54, 'Technologies': 3_25_16, 'Teenage': 6_61_60, 'Event': 3_27_69, 'Learned': 6_74_60, 'Notion': 18_27_70, 'Wikipedia': 3_75_83, 'Books': 66_65, 'Extract': 7_60_50, 'Confessions': 10_27_01, 'Conspiracy': 7_59_32, 'Links': 6_36_74, 'Narcissus': 15_04_25, 'Relationship': 5_47_66, 'Relationships': 13_47_96, 'Reviews': 4_16_71, 'News': 42_56, 'Translation': 2_68_20, 'multilingual': 12_84_06, } def a__ ( A__ ): SCREAMING_SNAKE_CASE_ : Optional[int] = set() SCREAMING_SNAKE_CASE_ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = char SCREAMING_SNAKE_CASE_ : Tuple = set(A__ ) return pairs class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = CONTROL_CODES def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , **lowerCAmelCase__ ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase__ , **lowerCAmelCase__ ) with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle: SCREAMING_SNAKE_CASE_ : Optional[Any] = json.load(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase__ , encoding='utf-8' ) as merges_handle: SCREAMING_SNAKE_CASE_ : Optional[Any] = merges_handle.read().split('\n' )[1:-1] SCREAMING_SNAKE_CASE_ : Tuple = [tuple(merge.split() ) for merge in merges] SCREAMING_SNAKE_CASE_ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) SCREAMING_SNAKE_CASE_ : Dict = {} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.encoder ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) SCREAMING_SNAKE_CASE_ : Dict = get_pairs(lowerCAmelCase__ ) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ : List[Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = bigram SCREAMING_SNAKE_CASE_ : List[str] = [] SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 while i < len(lowerCAmelCase__ ): try: SCREAMING_SNAKE_CASE_ : List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = j if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE_ : Dict = tuple(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = new_word if len(lowerCAmelCase__ ) == 1: break else: SCREAMING_SNAKE_CASE_ : Tuple = get_pairs(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = '@@ '.join(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = word[:-4] SCREAMING_SNAKE_CASE_ : Tuple = word return word def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(r'\S+\n?' , lowerCAmelCase__ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase__ ).split(' ' ) ) ) return split_tokens def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" return self.decoder.get(lowerCAmelCase__ , self.unk_token ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = ' '.join(lowerCAmelCase__ ).replace('@@ ' , '' ).strip() return out_string def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE_ : int = os.path.join( lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE_ : Any = os.path.join( lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' ) SCREAMING_SNAKE_CASE_ : Dict = 0 with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) SCREAMING_SNAKE_CASE_ : Dict = token_index writer.write(' '.join(lowerCAmelCase__ ) + '\n' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
101
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[Any] = '''efficientnet''' def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[str] = num_channels snake_case_ : Tuple = image_size snake_case_ : Union[str, Any] = width_coefficient snake_case_ : Tuple = depth_coefficient snake_case_ : Optional[Any] = depth_divisor snake_case_ : Optional[int] = kernel_sizes snake_case_ : str = in_channels snake_case_ : Optional[Any] = out_channels snake_case_ : int = depthwise_padding snake_case_ : Optional[Any] = strides snake_case_ : Any = num_block_repeats snake_case_ : Optional[Any] = expand_ratios snake_case_ : Union[str, Any] = squeeze_expansion_ratio snake_case_ : Union[str, Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dim snake_case_ : Any = pooling_type snake_case_ : List[str] = initializer_range snake_case_ : str = batch_norm_eps snake_case_ : Optional[int] = batch_norm_momentum snake_case_ : Optional[Any] = dropout_rate snake_case_ : List[str] = drop_connect_rate snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4 class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-5
60
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class lowercase__ ( unittest.TestCase ): """simple docstring""" @slow def _a ( self ): '''simple docstring''' UpperCamelCase : List[str] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) UpperCamelCase : Dict = { """input_ids""": tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } UpperCamelCase : Tuple = model(_A )["""last_hidden_state"""] UpperCamelCase : Union[str, Any] = tf.TensorShape((1, 6, 7_6_8) ) self.assertEqual(output.shape , _A ) # compare the actual values for a slice. UpperCamelCase : Any = tf.convert_to_tensor( [ [ [0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04], [-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44], [-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
102
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowerCAmelCase_ = logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int) lowerCAmelCase_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowerCAmelCase_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowerCAmelCase_ = Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase_ = [0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case = 1_2_8_0_2_2 snake_case = 1_2_8_0_2_8 @require_sentencepiece class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : Optional[Any] = MaMaaaTokenizer A__ : List[Any] = False A__ : Any = False A__ : str = True def __UpperCAmelCase ( self : str ): """simple docstring""" super().setUp() _snake_case = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] _snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) _snake_case = Path(self.tmpdirname ) save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) _snake_case = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : Optional[int] , **__lowerCamelCase : Union[str, Any] ): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] ): """simple docstring""" return ( "This is a test", "This is a test", ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = '''</s>''' _snake_case = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ): """simple docstring""" _snake_case = self.get_tokenizer() _snake_case = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(__lowerCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" pass def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = self.get_tokenizer() _snake_case = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2, 3, 4, 5, 6] , ) _snake_case = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(__lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) _snake_case = tokenizer.convert_tokens_to_string(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , '''This is a test''' ) @slow def __UpperCAmelCase ( self : Tuple ): """simple docstring""" # fmt: off _snake_case = {'''input_ids''': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): A__ : Tuple = '''facebook/m2m100_418M''' A__ : str = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] A__ : Union[str, Any] = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off A__ : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __UpperCAmelCase ( cls : Optional[int] ): """simple docstring""" _snake_case = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) _snake_case = 1 return cls def __UpperCAmelCase ( self : str ): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 1_2_8_0_6_3 ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" _snake_case = self.tokenizer.get_vocab() self.assertEqual(len(__lowerCamelCase ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = '''en''' _snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids ) # fmt: off _snake_case = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on _snake_case = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) _snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase ) def __UpperCAmelCase ( self : str ): """simple docstring""" _snake_case = tempfile.mkdtemp() _snake_case = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(__lowerCamelCase ) _snake_case = MaMaaaTokenizer.from_pretrained(__lowerCamelCase ) self.assertDictEqual(new_tok.lang_token_to_id , __lowerCamelCase ) @require_torch def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" _snake_case = '''en''' _snake_case = '''fr''' _snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors='''pt''' ) _snake_case = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _snake_case = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _snake_case = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _snake_case = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(__lowerCamelCase ) , { # en_XX, A, test, EOS '''input_ids''': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 1_2_8_0_0_6, } , )
103
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } snake_case_ : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): snake_case_ : int = token_dict['''token'''] snake_case_ : Optional[int] = Tokenizer(Unigram() ) snake_case_ : int = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) snake_case_ : Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ), pre_tokenizers.Digits(individual_digits=__magic_name__ ), pre_tokenizers.Punctuation(), ] ) snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ) snake_case_ : Optional[Any] = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) snake_case_ : Optional[Any] = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) if isinstance(__magic_name__ , __magic_name__ ): snake_case_ : Dict = [files] self._tokenizer.train(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int: '''simple docstring''' snake_case_ : Any = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = json.loads(self._tokenizer.to_str() ) snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id'''] snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
60
0
"""simple docstring""" import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]: A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 32) ** 2 A__ = num_patches + 1 def snake_case__ ( self ) -> List[Any]: A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self ) -> int: A__ = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 16, 32], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE__ , ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: A__ = ViTHybridModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() A__ = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() A__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self ) -> Any: A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): """simple docstring""" A__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ : str = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) A__ : Union[str, Any] = False A__ : List[str] = False A__ : Tuple = False def snake_case__ ( self ) -> Any: A__ = ViTHybridModelTester(self ) A__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def snake_case__ ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def snake_case__ ( self ) -> Union[str, Any]: pass def snake_case__ ( self ) -> int: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) ) def snake_case__ ( self ) -> List[Any]: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(SCREAMING_SNAKE_CASE__ ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self ) -> Dict: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self ) -> Optional[Any]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self ) -> Any: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(SCREAMING_SNAKE_CASE__ ) for model_class in self.all_model_classes: A__ = model_class(config=SCREAMING_SNAKE_CASE__ ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def snake_case__ ( self ) -> Tuple: for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( ) -> Any: """simple docstring""" A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self ) -> str: return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case__ ( self ) -> Optional[Any]: A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( SCREAMING_SNAKE_CASE__ ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): A__ = model(**SCREAMING_SNAKE_CASE__ ) # verify the logits A__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ ) A__ = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) @slow @require_accelerate def snake_case__ ( self ) -> Tuple: A__ = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" ) A__ = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" ) A__ = prepare_img() A__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
104
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = [False] * len(_UpperCamelCase ) snake_case_ : int = [-1] * len(_UpperCamelCase ) def dfs(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Dict = True snake_case_ : Dict = c for u in graph[v]: if not visited[u]: dfs(_UpperCamelCase , 1 - c ) for i in range(len(_UpperCamelCase ) ): if not visited[i]: dfs(_UpperCamelCase , 0 ) for i in range(len(_UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
60
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = { '''andreasmadsen/efficient_mlm_m0.40''': ( '''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json''' ), } class lowerCAmelCase_ ( lowerCamelCase_ ): __a : int = "roberta-prelayernorm" def __init__( self ,snake_case__=50265 ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,snake_case__="absolute" ,snake_case__=True ,snake_case__=None ,**snake_case__ ,): super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ ) SCREAMING_SNAKE_CASE_ : Dict = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_size SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : str = max_position_embeddings SCREAMING_SNAKE_CASE_ : Any = type_vocab_size SCREAMING_SNAKE_CASE_ : Tuple = initializer_range SCREAMING_SNAKE_CASE_ : int = layer_norm_eps SCREAMING_SNAKE_CASE_ : int = position_embedding_type SCREAMING_SNAKE_CASE_ : Dict = use_cache SCREAMING_SNAKE_CASE_ : str = classifier_dropout class lowerCAmelCase_ ( lowerCamelCase_ ): @property def snake_case ( self ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE_ : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
105
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int: '''simple docstring''' snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20} snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case_ : str = parent snake_case_ : Optional[int] = batch_size snake_case_ : Dict = num_channels snake_case_ : List[Any] = image_size snake_case_ : Union[str, Any] = min_resolution snake_case_ : Tuple = max_resolution snake_case_ : str = do_resize snake_case_ : Tuple = size snake_case_ : int = do_center_crop snake_case_ : Tuple = crop_size snake_case_ : int = do_normalize snake_case_ : Optional[Any] = image_mean snake_case_ : List[str] = image_std snake_case_ : str = do_reduce_labels def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] ) snake_case_ : str = Image.open(dataset[1]['''file'''] ) return image, map def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] ) snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] ) snake_case_ : List[str] = Image.open(ds[2]['''file'''] ) snake_case_ : str = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = BeitImageProcessingTester(self ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) snake_case_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) snake_case_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs() snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) snake_case_ : List[Any] = True snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
60
0
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class lowerCAmelCase__ : A_ : str = field( metadata={'help': 'The output directory where the model will be written.'} , ) A_ : str = field( metadata={ 'help': ( 'The encoder model checkpoint for weights initialization.' 'Don\'t set if you want to train an encoder model from scratch.' ) } , ) A_ : str = field( metadata={ 'help': ( 'The decoder model checkpoint for weights initialization.' 'Don\'t set if you want to train a decoder model from scratch.' ) } , ) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} ) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} ) def lowerCamelCase_ ( ) -> Optional[Any]: '''simple docstring''' A = HfArgumentParser((ModelArguments,) ) ((A) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: A = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: A = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: A = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: A = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed A = True A = True A = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens A = decoder_config.decoder_start_token_id A = decoder_config.pad_token_id if decoder_start_token_id is None: A = decoder_config.bos_token_id if pad_token_id is None: A = decoder_config.eos_token_id # This is necessary to make Flax's generate() work A = decoder_config.eos_token_id A = decoder_start_token_id A = pad_token_id A = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) A = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) A = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
106
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : List[Any] = mean_squared_error( __magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ ) return {"mse": mse}
60
0
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _UpperCAmelCase : Dict = '''\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ''' _UpperCAmelCase : Any = '''\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve ''' _UpperCAmelCase : Dict = ''' Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric(\'mauve\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/krishnap25/mauve', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('string', id='sequence' ), 'references': datasets.Value('string', id='sequence' ), } ), codebase_urls=['https://github.com/krishnap25/mauve'], reference_urls=[ 'https://arxiv.org/abs/2102.01454', 'https://github.com/krishnap25/mauve', ], ) def __UpperCAmelCase ( self : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : str=None, UpperCamelCase__ : Any="auto", UpperCamelCase__ : Optional[int]=-1, UpperCamelCase__ : Dict=0.9, UpperCamelCase__ : List[str]=5, UpperCamelCase__ : Dict=5_00, UpperCamelCase__ : Optional[int]="gpt2-large", UpperCamelCase__ : Dict=-1, UpperCamelCase__ : Union[str, Any]=10_24, UpperCamelCase__ : str=25, UpperCamelCase__ : Any=5, UpperCamelCase__ : str=True, UpperCamelCase__ : List[Any]=25, ) -> Tuple: _A = compute_mauve( p_text=UpperCamelCase__, q_text=UpperCamelCase__, p_features=UpperCamelCase__, q_features=UpperCamelCase__, p_tokens=UpperCamelCase__, q_tokens=UpperCamelCase__, num_buckets=UpperCamelCase__, pca_max_data=UpperCamelCase__, kmeans_explained_var=UpperCamelCase__, kmeans_num_redo=UpperCamelCase__, kmeans_max_iter=UpperCamelCase__, featurize_model_name=UpperCamelCase__, device_id=UpperCamelCase__, max_text_length=UpperCamelCase__, divergence_curve_discretization_size=UpperCamelCase__, mauve_scaling_factor=UpperCamelCase__, verbose=UpperCamelCase__, seed=UpperCamelCase__, ) return out
107
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : lowerCamelCase_ : Any = None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
60
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]: _UpperCAmelCase , _UpperCAmelCase = image.size _UpperCAmelCase , _UpperCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32 _UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) _UpperCAmelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0 _UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 ) _UpperCAmelCase = torch.from_numpy(__snake_case ) return 2.0 * image - 1.0 class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ) -> Any: """simple docstring""" super().__init__() self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase ) @torch.no_grad() def __call__( self : Tuple , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" if isinstance(lowerCamelCase , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(lowerCamelCase , torch.Tensor ): _UpperCAmelCase = image.shape[0] else: raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}""" ) if isinstance(lowerCamelCase , PIL.Image.Image ): _UpperCAmelCase = preprocess(lowerCamelCase ) _UpperCAmelCase , _UpperCAmelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width) _UpperCAmelCase = next(self.unet.parameters() ).dtype _UpperCAmelCase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase ) _UpperCAmelCase = image.to(device=self.device , dtype=lowerCamelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowerCamelCase , device=self.device ) _UpperCAmelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase = {} if accepts_eta: _UpperCAmelCase = eta for t in self.progress_bar(lowerCamelCase ): # concat latents and low resolution image in the channel dimension. _UpperCAmelCase = torch.cat([latents, image] , dim=1 ) _UpperCAmelCase = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) # predict the noise residual _UpperCAmelCase = self.unet(lowerCamelCase , lowerCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample # decode the image latents with the VQVAE _UpperCAmelCase = self.vqvae.decode(lowerCamelCase ).sample _UpperCAmelCase = torch.clamp(lowerCamelCase , -1.0 , 1.0 ) _UpperCAmelCase = image / 2 + 0.5 _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase )
108
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : str = None @staticmethod def lowerCamelCase () -> Any: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase (cls ) -> List[Any]: '''simple docstring''' return F'''`pip install {cls.pip_package or cls.name}`''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[int] = '''optuna''' @staticmethod def lowerCamelCase () -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_optuna(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''ray''' lowerCamelCase_ : List[str] = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase () -> List[Any]: '''simple docstring''' return is_ray_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_ray(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''sigopt''' @staticmethod def lowerCamelCase () -> Optional[int]: '''simple docstring''' return is_sigopt_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' return default_hp_space_sigopt(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''wandb''' @staticmethod def lowerCamelCase () -> Dict: '''simple docstring''' return is_wandb_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return default_hp_space_wandb(__magic_name__ ) lowerCAmelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_UpperCamelCase ) > 0: snake_case_ : Dict = available_backends[0].name if len(_UpperCamelCase ) > 1: logger.info( f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
60
0
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : Optional[int] = 'autoformer' __UpperCamelCase : int = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Optional[int] ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : str = "student_t" ,lowerCamelCase : str = "nll" ,lowerCamelCase : int = 1 ,lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] ,lowerCamelCase : bool = True ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : int = 64 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 32 ,lowerCamelCase : int = 32 ,lowerCamelCase : str = "gelu" ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : int = 100 ,lowerCamelCase : float = 0.02 ,lowerCamelCase : bool = True ,lowerCamelCase : str=True ,lowerCamelCase : int = 10 ,lowerCamelCase : int = 25 ,lowerCamelCase : int = 3 ,**lowerCamelCase : Dict ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = prediction_length __SCREAMING_SNAKE_CASE = context_length if context_length is not None else prediction_length __SCREAMING_SNAKE_CASE = distribution_output __SCREAMING_SNAKE_CASE = loss __SCREAMING_SNAKE_CASE = input_size __SCREAMING_SNAKE_CASE = num_time_features __SCREAMING_SNAKE_CASE = lags_sequence __SCREAMING_SNAKE_CASE = scaling __SCREAMING_SNAKE_CASE = num_dynamic_real_features __SCREAMING_SNAKE_CASE = num_static_real_features __SCREAMING_SNAKE_CASE = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(lowerCamelCase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) __SCREAMING_SNAKE_CASE = cardinality else: __SCREAMING_SNAKE_CASE = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(lowerCamelCase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) __SCREAMING_SNAKE_CASE = embedding_dimension else: __SCREAMING_SNAKE_CASE = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality] __SCREAMING_SNAKE_CASE = num_parallel_samples # Transformer architecture configuration __SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence ) + self._number_of_features __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = decoder_layerdrop __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = use_cache # Autoformer __SCREAMING_SNAKE_CASE = label_length __SCREAMING_SNAKE_CASE = moving_average __SCREAMING_SNAKE_CASE = autocorrelation_factor super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase ) @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
109
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list: """simple docstring""" snake_case_ : Tuple = len(_UpperCamelCase ) snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): snake_case_ : Any = y_points[i] for i in range(2 , _UpperCamelCase ): for j in range(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[int] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
60
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig class a ( lowercase ): UpperCamelCase : Union[str, Any] = """bert-generation""" def __init__( self , UpperCamelCase_=50_358 , UpperCamelCase_=1_024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4_096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) UpperCAmelCase__ : Optional[Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : Dict = num_attention_heads UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : Optional[Any] = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : List[Any] = max_position_embeddings UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : Optional[int] = layer_norm_eps UpperCAmelCase__ : Union[str, Any] = position_embedding_type UpperCAmelCase__ : Dict = use_cache
110
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class lowercase__ ( _a ): def UpperCAmelCase ( self )-> Dict: '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(__UpperCAmelCase ) def UpperCAmelCase ( self )-> str: '''simple docstring''' lowerCAmelCase__ = self._create_example_records() lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase ) self.assertListEqual(dset.column_names , ["col_1", "col_2"] ) for i, r in enumerate(__UpperCAmelCase ): self.assertDictEqual(__UpperCAmelCase , example_records[i] ) def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' lowerCAmelCase__ = self._create_example_records() lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase ) lowerCAmelCase__ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def UpperCAmelCase ( self )-> Any: # checks what happens with missing columns '''simple docstring''' lowerCAmelCase__ = [{'''col_1''': 1}, {'''col_2''': '''x'''}] lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase ) self.assertDictEqual(dset[0] , {"col_1": 1} ) self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns def UpperCAmelCase ( self )-> Optional[int]: # checks if the type can be inferred from the second record '''simple docstring''' lowerCAmelCase__ = [{'''col_1''': []}, {'''col_1''': [1, 2]}] lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase ) self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' lowerCAmelCase__ = Dataset.from_list([] ) self.assertEqual(len(__UpperCAmelCase ) , 0 ) self.assertListEqual(dset.column_names , [] )
339
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return getitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" return setitem, k, v def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" return delitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str: """simple docstring""" try: return fun(_UpperCamelCase , *_UpperCamelCase ), None except Exception as e: return None, e lowerCAmelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCAmelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Any = HashMap(initial_block_size=4 ) snake_case_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(_UpperCamelCase ): snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) assert my_res == py_res assert str(_UpperCamelCase ) == str(_UpperCamelCase ) assert set(_UpperCamelCase ) == set(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" def is_public(_UpperCamelCase ) -> bool: return not name.startswith('''_''' ) snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )} snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )} assert dict_public_names > hash_public_names
60
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : List[Any] = { '''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class lowerCAmelCase__ ( _a ): '''simple docstring''' lowercase_ = '''wavlm''' def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 2, 2, 2, 2, 2) , lowercase__=(1_0, 3, 3, 3, 3, 2, 2) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=3_2_0 , lowercase__=8_0_0 , lowercase__=False , lowercase__=True , lowercase__=0.05 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=3_2_0 , lowercase__=2 , lowercase__=0.1 , lowercase__=1_0_0 , lowercase__=2_5_6 , lowercase__=2_5_6 , lowercase__=0.1 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase__=(5, 3, 3, 1, 1) , lowercase__=(1, 2, 3, 1, 1) , lowercase__=5_1_2 , lowercase__=8_0 , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=False , lowercase__=3 , lowercase__=2 , lowercase__=3 , lowercase__=None , **lowercase__ , ): '''simple docstring''' super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) __A =hidden_size __A =feat_extract_norm __A =feat_extract_activation __A =list(lowercase__ ) __A =list(lowercase__ ) __A =list(lowercase__ ) __A =conv_bias __A =num_buckets __A =max_bucket_distance __A =num_conv_pos_embeddings __A =num_conv_pos_embedding_groups __A =len(self.conv_dim ) __A =num_hidden_layers __A =intermediate_size __A =hidden_act __A =num_attention_heads __A =hidden_dropout __A =attention_dropout __A =activation_dropout __A =feat_proj_dropout __A =final_dropout __A =layerdrop __A =layer_norm_eps __A =initializer_range __A =num_ctc_classes __A =vocab_size __A =do_stable_layer_norm __A =use_weighted_layer_sum __A =classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __A =apply_spec_augment __A =mask_time_prob __A =mask_time_length __A =mask_time_min_masks __A =mask_feature_prob __A =mask_feature_length # parameters for pretraining with codevector quantized representations __A =num_codevectors_per_group __A =num_codevector_groups __A =contrastive_logits_temperature __A =num_negatives __A =codevector_dim __A =proj_codevector_dim __A =diversity_loss_weight # ctc loss __A =ctc_loss_reduction __A =ctc_zero_infinity # adapter __A =add_adapter __A =adapter_kernel_size __A =adapter_stride __A =num_adapter_layers __A =output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __A =classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __A =list(lowercase__ ) __A =list(lowercase__ ) __A =list(lowercase__ ) __A =xvector_output_dim @property def __UpperCamelCase ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
184
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase ) -> list: """simple docstring""" if len(_UpperCamelCase ) == 0: return [] snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase ) snake_case_ : List[str] = int(max_value - min_value ) + 1 snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )] for i in my_list: buckets[int(i - min_value )].append(_UpperCamelCase ) return [v for bucket in buckets for v in sorted(_UpperCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
60
0
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : List[str] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class __UpperCAmelCase( _a , unittest.TestCase ): """simple docstring""" __lowerCamelCase = GPTSwaTokenizer __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ : List[Any]= GPTSwaTokenizer(snake_case__ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= '''This is a test''' lowercase__ : Tuple= '''This is a test''' return input_text, output_text def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= '''<s>''' lowercase__ : int= 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(snake_case__ ) , 2000 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= GPTSwaTokenizer(snake_case__ ) lowercase__ : Optional[int]= tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] ) lowercase__ : int= tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( snake_case__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowercase__ : Union[str, Any]= tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowercase__ : Union[str, Any]= tokenizer.convert_ids_to_tokens(snake_case__ ) # fmt: off self.assertListEqual( snake_case__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= GPTSwaTokenizer(snake_case__ ) lowercase__ : Tuple= ['''This is a test''', '''I was born in 92000, and this is falsé.'''] lowercase__ : str= [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(snake_case__ , snake_case__ ): self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ ) # Test that decode_fast returns the input text for text, token_ids in zip(snake_case__ , snake_case__ ): self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off lowercase__ : Optional[Any]= {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=snake_case__ , )
218
import tensorflow as tf from ...tf_utils import shape_list class __lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[Any] = vocab_size snake_case_ : Dict = d_embed snake_case_ : Union[str, Any] = d_proj snake_case_ : str = cutoffs + [vocab_size] snake_case_ : int = [0] + self.cutoffs snake_case_ : Optional[int] = div_val snake_case_ : int = self.cutoffs[0] snake_case_ : Any = len(self.cutoffs ) - 1 snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters snake_case_ : str = keep_order snake_case_ : int = [] snake_case_ : Union[str, Any] = [] def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' if self.n_clusters > 0: snake_case_ : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case_ : List[str] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , ) self.out_projs.append(__magic_name__ ) else: self.out_projs.append(__magic_name__ ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : List[str] = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i) snake_case_ : int = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' ) self.out_projs.append(__magic_name__ ) snake_case_ : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : Any = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(__magic_name__ ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = x if proj is not None: snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ ) return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = shape_list(__magic_name__ ) snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) snake_case_ : Dict = tf.stack([r, target] , 1 ) return tf.gather_nd(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = 0 if self.n_clusters == 0: snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ ) snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 ) else: snake_case_ : Optional[int] = shape_list(__magic_name__ ) snake_case_ : int = [] snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case_ : str = (target >= l_idx) & (target < r_idx) snake_case_ : Dict = tf.where(__magic_name__ ) snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx if self.div_val == 1: snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx] snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx] else: snake_case_ : Union[str, Any] = self.out_layers[i][0] snake_case_ : int = self.out_layers[i][1] if i == 0: snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 ) snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] ) snake_case_ : Any = tf.nn.log_softmax(__magic_name__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ ) else: snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] ) snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ ) snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__magic_name__ ) if target is not None: snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) ) snake_case_ : str = tf.concat(__magic_name__ , axis=-1 ) if target is not None: if return_mean: snake_case_ : int = tf.reduce_mean(__magic_name__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__magic_name__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
60
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class SCREAMING_SNAKE_CASE__ ( _a ): """simple docstring""" a_ = '''cvt''' def __init__( self : List[str] , __A : str=3 , __A : str=[7, 3, 3] , __A : Tuple=[4, 2, 2] , __A : int=[2, 1, 1] , __A : List[Any]=[6_4, 1_9_2, 3_8_4] , __A : Dict=[1, 3, 6] , __A : str=[1, 2, 1_0] , __A : Union[str, Any]=[4.0, 4.0, 4.0] , __A : int=[0.0, 0.0, 0.0] , __A : Union[str, Any]=[0.0, 0.0, 0.0] , __A : Union[str, Any]=[0.0, 0.0, 0.1] , __A : Any=[True, True, True] , __A : List[Any]=[False, False, True] , __A : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , __A : Optional[Any]=[3, 3, 3] , __A : Union[str, Any]=[1, 1, 1] , __A : List[str]=[2, 2, 2] , __A : str=[1, 1, 1] , __A : Tuple=[1, 1, 1] , __A : List[Any]=0.0_2 , __A : Tuple=1e-1_2 , **__A : Any , ): super().__init__(**__A ) snake_case__ : int = num_channels snake_case__ : str = patch_sizes snake_case__ : Dict = patch_stride snake_case__ : str = patch_padding snake_case__ : List[str] = embed_dim snake_case__ : int = num_heads snake_case__ : Union[str, Any] = depth snake_case__ : Union[str, Any] = mlp_ratio snake_case__ : List[str] = attention_drop_rate snake_case__ : Tuple = drop_rate snake_case__ : Any = drop_path_rate snake_case__ : Optional[int] = qkv_bias snake_case__ : Tuple = cls_token snake_case__ : Dict = qkv_projection_method snake_case__ : Dict = kernel_qkv snake_case__ : List[Any] = padding_kv snake_case__ : Dict = stride_kv snake_case__ : List[str] = padding_q snake_case__ : List[Any] = stride_q snake_case__ : Dict = initializer_range snake_case__ : Dict = layer_norm_eps
297
import requests def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Tuple = {'''Content-Type''': '''application/json'''} snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase ) if response.status_code != 200: snake_case_ : List[Any] = ( '''Request to slack returned an error ''' f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(_UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
60
0
from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline _UpperCamelCase : int =logging.get_logger(__name__) class UpperCAmelCase__ ( _a ): def A__ ( self ,A__ ): if isinstance(A__ ,A__ ): _A : Optional[Any] = [label.strip() for label in labels.split(''',''' ) if label.strip()] return labels def __call__( self ,A__ ,A__ ,A__ ): if len(A__ ) == 0 or len(A__ ) == 0: raise ValueError('''You must include at least one label and at least one sequence.''' ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( '''The provided hypothesis_template "{}" was not able to be formatted with the target labels. ''' '''Make sure the passed template includes formatting syntax such as {{}} where the label should go.''' ).format(A__ ) ) if isinstance(A__ ,A__ ): _A : Union[str, Any] = [sequences] _A : Dict = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(A__ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(_a ) class UpperCAmelCase__ ( _a ): def __init__( self ,A__=ZeroShotClassificationArgumentHandler() ,*A__ ,**A__ ): _A : str = args_parser super().__init__(*A__ ,**A__ ) if self.entailment_id == -1: logger.warning( '''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to ''' '''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' ) @property def A__ ( self ): for label, ind in self.model.config.labelaid.items(): if label.lower().startswith('''entail''' ): return ind return -1 def A__ ( self ,A__ ,A__=True ,A__=True ,A__=TruncationStrategy.ONLY_FIRST ,**A__ ): _A : List[Any] = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( '''Tokenizer was not supporting padding necessary for zero-shot, attempting to use ''' ''' `pad_token=eos_token`''' ) _A : List[str] = self.tokenizer.eos_token try: _A : List[str] = self.tokenizer( A__ ,add_special_tokens=A__ ,return_tensors=A__ ,padding=A__ ,truncation=A__ ,) except Exception as e: if "too short" in str(A__ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. _A : Union[str, Any] = self.tokenizer( A__ ,add_special_tokens=A__ ,return_tensors=A__ ,padding=A__ ,truncation=TruncationStrategy.DO_NOT_TRUNCATE ,) else: raise e return inputs def A__ ( self ,**A__ ): if kwargs.get('''multi_class''' ,A__ ) is not None: _A : Optional[int] = kwargs['''multi_class'''] logger.warning( '''The `multi_class` argument has been deprecated and renamed to `multi_label`. ''' '''`multi_class` will be removed in a future version of Transformers.''' ) _A : Optional[int] = {} if "candidate_labels" in kwargs: _A : Any = self._args_parser._parse_labels(kwargs['''candidate_labels'''] ) if "hypothesis_template" in kwargs: _A : str = kwargs['''hypothesis_template'''] _A : Optional[int] = {} if "multi_label" in kwargs: _A : Optional[int] = kwargs['''multi_label'''] return preprocess_params, {}, postprocess_params def __call__( self ,A__ ,*A__ ,**A__ ,): if len(A__ ) == 0: pass elif len(A__ ) == 1 and "candidate_labels" not in kwargs: _A : Any = args[0] else: raise ValueError(f"""Unable to understand extra arguments {args}""" ) return super().__call__(A__ ,**A__ ) def A__ ( self ,A__ ,A__=None ,A__="This example is {}." ): _A : List[str] = self._args_parser(A__ ,A__ ,A__ ) for i, (candidate_label, sequence_pair) in enumerate(zip(A__ ,A__ ) ): _A : List[str] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(A__ ) - 1, **model_input, } def A__ ( self ,A__ ): _A : int = inputs['''candidate_label'''] _A : Dict = inputs['''sequence'''] _A : Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names} _A : int = self.model(**A__ ) _A : List[str] = { '''candidate_label''': candidate_label, '''sequence''': sequence, '''is_last''': inputs['''is_last'''], **outputs, } return model_outputs def A__ ( self ,A__ ,A__=False ): _A : str = [outputs['''candidate_label'''] for outputs in model_outputs] _A : Dict = [outputs['''sequence'''] for outputs in model_outputs] _A : str = np.concatenate([output['''logits'''].numpy() for output in model_outputs] ) _A : Union[str, Any] = logits.shape[0] _A : Optional[int] = len(A__ ) _A : Optional[Any] = N // n _A : List[str] = logits.reshape((num_sequences, n, -1) ) if multi_label or len(A__ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently _A : Optional[Any] = self.entailment_id _A : Optional[int] = -1 if entailment_id == 0 else 0 _A : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]] _A : Tuple = np.exp(A__ ) / np.exp(A__ ).sum(-1 ,keepdims=A__ ) _A : Optional[int] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels _A : Dict = reshaped_outputs[..., self.entailment_id] _A : Union[str, Any] = np.exp(A__ ) / np.exp(A__ ).sum(-1 ,keepdims=A__ ) _A : Dict = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
206
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class __magic_name__ ( unittest.TestCase ): def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = BlipImageProcessor() _lowerCAmelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) _lowerCAmelCase = BlipaProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self , **__magic_name__ ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def _lowerCamelCase ( self , **__magic_name__ ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def _lowerCamelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _lowerCAmelCase = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) _lowerCAmelCase = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = image_processor(__magic_name__ , return_tensors='np' ) _lowerCAmelCase = processor(images=__magic_name__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) _lowerCAmelCase = '''lower newer''' _lowerCAmelCase = processor(text=__magic_name__ ) _lowerCAmelCase = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) _lowerCAmelCase = '''lower newer''' _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) _lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase = processor.batch_decode(__magic_name__ ) _lowerCAmelCase = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) _lowerCAmelCase = '''lower newer''' _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
589
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''owlvit_text_model''' def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : int = vocab_size snake_case_ : str = hidden_size snake_case_ : List[Any] = intermediate_size snake_case_ : str = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : Union[str, Any] = initializer_range snake_case_ : int = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit_vision_model''' def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : Optional[Any] = hidden_size snake_case_ : Union[str, Any] = intermediate_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : List[Any] = num_channels snake_case_ : Union[str, Any] = image_size snake_case_ : Dict = patch_size snake_case_ : List[Any] = hidden_act snake_case_ : Tuple = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : List[str] = initializer_range snake_case_ : List[Any] = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit''' lowerCamelCase_ : Optional[int] = True def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) if text_config is None: snake_case_ : Tuple = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: snake_case_ : str = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) snake_case_ : str = OwlViTTextConfig(**__magic_name__ ) snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ ) snake_case_ : Any = projection_dim snake_case_ : Union[str, Any] = logit_scale_init_value snake_case_ : str = return_dict snake_case_ : Any = 1.0 @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str: '''simple docstring''' snake_case_ : Optional[int] = {} snake_case_ : Union[str, Any] = text_config snake_case_ : Optional[Any] = vision_config return cls.from_dict(__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : List[Any] = self.text_config.to_dict() snake_case_ : List[Any] = self.vision_config.to_dict() snake_case_ : Tuple = self.__class__.model_type return output class __lowerCAmelCase ( _a ): @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-4 def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : Dict = super().generate_dummy_inputs( processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ ) snake_case_ : List[str] = super().generate_dummy_inputs( processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ ) return {**text_input_dict, **image_input_dict} @property def lowerCamelCase (self ) -> int: '''simple docstring''' return 14
60
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class __magic_name__ ( _a ): _lowerCAmelCase = '''conditional_detr''' _lowerCAmelCase = ['''past_key_values'''] _lowerCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : int , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]=3_0_0 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Optional[Any]=2_0_4_8 , lowerCamelCase__ : int=8 , lowerCamelCase__ : Union[str, Any]=6 , lowerCamelCase__ : Dict=2_0_4_8 , lowerCamelCase__ : Union[str, Any]=8 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[Any]="relu" , lowerCamelCase__ : Optional[Any]=2_5_6 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : Tuple=1.0 , lowerCamelCase__ : str=False , lowerCamelCase__ : Dict="sine" , lowerCamelCase__ : Optional[Any]="resnet50" , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Union[str, Any]=5 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Optional[Any]=0.2_5 , **lowerCamelCase__ : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase : Union[str, Any] = backbone_config.get('''model_type''' ) lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase : List[Any] = config_class.from_dict(lowerCamelCase__ ) lowerCAmelCase : Tuple = use_timm_backbone lowerCAmelCase : Tuple = backbone_config lowerCAmelCase : Optional[int] = num_channels lowerCAmelCase : Dict = num_queries lowerCAmelCase : List[str] = d_model lowerCAmelCase : List[Any] = encoder_ffn_dim lowerCAmelCase : List[str] = encoder_layers lowerCAmelCase : Dict = encoder_attention_heads lowerCAmelCase : str = decoder_ffn_dim lowerCAmelCase : Tuple = decoder_layers lowerCAmelCase : str = decoder_attention_heads lowerCAmelCase : Dict = dropout lowerCAmelCase : Dict = attention_dropout lowerCAmelCase : List[str] = activation_dropout lowerCAmelCase : Union[str, Any] = activation_function lowerCAmelCase : Any = init_std lowerCAmelCase : Any = init_xavier_std lowerCAmelCase : List[str] = encoder_layerdrop lowerCAmelCase : Tuple = decoder_layerdrop lowerCAmelCase : Optional[Any] = encoder_layers lowerCAmelCase : List[str] = auxiliary_loss lowerCAmelCase : Any = position_embedding_type lowerCAmelCase : str = backbone lowerCAmelCase : List[Any] = use_pretrained_backbone lowerCAmelCase : List[str] = dilation # Hungarian matcher lowerCAmelCase : Any = class_cost lowerCAmelCase : Optional[int] = bbox_cost lowerCAmelCase : List[Any] = giou_cost # Loss coefficients lowerCAmelCase : Optional[Any] = mask_loss_coefficient lowerCAmelCase : Optional[Any] = dice_loss_coefficient lowerCAmelCase : Any = cls_loss_coefficient lowerCAmelCase : str = bbox_loss_coefficient lowerCAmelCase : Optional[int] = giou_loss_coefficient lowerCAmelCase : Tuple = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ ) @property def _A ( self : List[Any] ): return self.encoder_attention_heads @property def _A ( self : Optional[int] ): return self.d_model def _A ( self : List[str] ): lowerCAmelCase : int = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCAmelCase : str = self.backbone_config.to_dict() lowerCAmelCase : Optional[int] = self.__class__.model_type return output class __magic_name__ ( _a ): _lowerCAmelCase = version.parse("1.11" ) @property def _A ( self : Dict ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _A ( self : Union[str, Any] ): return 1E-5 @property def _A ( self : List[Any] ): return 1_2
348
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch'''] lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate''' lowerCamelCase_ : Tuple = '''default_config.yaml''' lowerCamelCase_ : str = config_folder / config_file lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml''' lowerCamelCase_ : Dict = Path('''tests/test_configs''' ) @classmethod def lowerCamelCase (cls ) -> Dict: '''simple docstring''' if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCamelCase (cls ) -> Any: '''simple docstring''' if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=__magic_name__ ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : List[str] = '''test-tpu''' lowerCamelCase_ : Dict = '''us-central1-a''' lowerCamelCase_ : Any = '''ls''' lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config'''] lowerCamelCase_ : Tuple = '''cd /usr/share''' lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh''' lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh''' def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Tuple = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
60
0
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _a : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(_a ) class __A ( _a ): def __init__( self , **a__ ): super().__init__(**a__ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , a__ , **a__ ): return super().__call__(a__ , **a__ ) def __A ( self , **a__ ): _lowerCAmelCase : Dict = {} if "candidate_labels" in kwargs: _lowerCAmelCase : Optional[int] = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: _lowerCAmelCase : Tuple = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def __A ( self , a__ , a__=None , a__="This is a photo of {}." ): _lowerCAmelCase : Union[str, Any] = load_image(a__ ) _lowerCAmelCase : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework ) _lowerCAmelCase : str = candidate_labels _lowerCAmelCase : Any = [hypothesis_template.format(a__ ) for x in candidate_labels] _lowerCAmelCase : List[Any] = self.tokenizer(a__ , return_tensors=self.framework , padding=a__ ) _lowerCAmelCase : str = [text_inputs] return inputs def __A ( self , a__ ): _lowerCAmelCase : Dict = model_inputs.pop("""candidate_labels""" ) _lowerCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , a__ ): _lowerCAmelCase : Dict = text_inputs[0] else: # Batching case. _lowerCAmelCase : List[str] = text_inputs[0][0] _lowerCAmelCase : str = self.model(**a__ , **a__ ) _lowerCAmelCase : Any = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def __A ( self , a__ ): _lowerCAmelCase : Optional[int] = model_outputs.pop("""candidate_labels""" ) _lowerCAmelCase : Optional[int] = model_outputs['''logits'''][0] if self.framework == "pt": _lowerCAmelCase : Dict = logits.softmax(dim=-1 ).squeeze(-1 ) _lowerCAmelCase : Optional[Any] = probs.tolist() if not isinstance(a__ , a__ ): _lowerCAmelCase : Optional[Any] = [scores] elif self.framework == "tf": _lowerCAmelCase : Tuple = stable_softmax(a__ , axis=-1 ) _lowerCAmelCase : List[str] = probs.numpy().tolist() else: raise ValueError(F"Unsupported framework: {self.framework}" ) _lowerCAmelCase : Union[str, Any] = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(a__ , a__ ) , key=lambda a__ : -x[0] ) ] return result
213
import warnings from ..trainer import Trainer from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict: '''simple docstring''' warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , __magic_name__ , ) super().__init__(args=__magic_name__ , **__magic_name__ )
60
0
import os import sys import unittest a_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ = os.path.join(git_repo_path, """src""", """transformers""") a_ = """ {0} = None """ a_ = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(__UpperCAmelCase ) __lowerCamelCase = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(__UpperCAmelCase , '''tokenizers''' ) __lowerCamelCase = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(__UpperCAmelCase , '''tensorflow_text''' ) __lowerCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tokenizers''' ) __lowerCamelCase = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tensorflow_text''' ) __lowerCamelCase = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __UpperCAmelCase ) self.assertIn('''tensorflow_text''' , __UpperCAmelCase ) self.assertIn('''sentencepiece_and_tokenizers''' , __UpperCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(__UpperCAmelCase , '''\nCONSTANT = None\n''' ) __lowerCamelCase = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( __UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __lowerCamelCase = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __lowerCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __lowerCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , __UpperCAmelCase )
175
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : str = '''mock-s3-bucket''' snake_case_ : str = f'''s3://{mock_bucket}''' snake_case_ : Any = extract_path_from_uri(_UpperCamelCase ) assert dataset_path.startswith('''s3://''' ) is False snake_case_ : Optional[Any] = '''./local/path''' snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase ) assert dataset_path == new_dataset_path def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase ) assert is_remote is True snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' ) snake_case_ : int = is_remote_filesystem(_UpperCamelCase ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol] if input_path is None: snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_UpperCamelCase ) snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase ) assert isinstance(_UpperCamelCase , _UpperCamelCase ) snake_case_ : int = os.path.basename(_UpperCamelCase ) snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} snake_case_ : Any = compressed_file_paths[protocol] snake_case_ : Any = '''dataset.jsonl''' snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}''' snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase ) assert fs.isfile(_UpperCamelCase ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase ) snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(_UpperCamelCase ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Tuple = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase ) with pytest.warns(_UpperCamelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_UpperCamelCase ) == 1 assert ( str(warning_info[0].message ) == f'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
60
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) _snake_case = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
245
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[Any] = '''encoder-decoder''' lowerCamelCase_ : Optional[Any] = True def __init__(self , **__magic_name__ ) -> Optional[int]: '''simple docstring''' super().__init__(**__magic_name__ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case_ : Any = kwargs.pop('''encoder''' ) snake_case_ : Tuple = encoder_config.pop('''model_type''' ) snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' ) snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : Any = True @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case_ : Tuple = True snake_case_ : Optional[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : str = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.encoder.to_dict() snake_case_ : Dict = self.decoder.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
60
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , A_ : Optional[Any] , A_ : Optional[Any]=7 , A_ : Optional[int]=3 , A_ : Optional[Any]=18 , A_ : Optional[int]=30 , A_ : Optional[Any]=4_00 , A_ : List[Any]=True , A_ : List[str]=None , A_ : List[str]=True , A_ : Optional[int]=None , A_ : Dict=True , A_ : List[str]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , A_ : Any=[0.26_862_954, 0.26_130_258, 0.27_577_711] , A_ : List[Any]=True , )-> Optional[Any]: __UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = num_channels __UpperCamelCase = image_size __UpperCamelCase = min_resolution __UpperCamelCase = max_resolution __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_normalize __UpperCamelCase = image_mean __UpperCamelCase = image_std __UpperCamelCase = do_convert_rgb def A ( self : str )-> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def A ( self : List[str] , A_ : Dict=False , A_ : int=False , A_ : Tuple=False )-> Optional[int]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __UpperCamelCase = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __UpperCamelCase = [] for i in range(self.batch_size ): __UpperCamelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] if torchify: __UpperCamelCase = [torch.from_numpy(A_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class __UpperCAmelCase ( _a , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def A ( self : Any )-> Optional[int]: __UpperCamelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=A_ ) @property def A ( self : List[str] )-> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : Dict )-> str: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "size" ) ) self.assertTrue(hasattr(A_ , "do_center_crop" ) ) self.assertTrue(hasattr(A_ , "center_crop" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_convert_rgb" ) ) def A ( self : Optional[int] )-> Optional[Any]: __UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 2_24, "width": 2_24} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) __UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def A ( self : Optional[int] )-> Any: pass def A ( self : Tuple )-> Union[str, Any]: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def A ( self : Tuple )-> List[str]: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def A ( self : Any )-> Union[str, Any]: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class __UpperCAmelCase ( _a , unittest.TestCase ): """simple docstring""" _snake_case : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None def A ( self : int )-> int: __UpperCamelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A_ ) __UpperCamelCase = 3 @property def A ( self : Dict )-> Dict: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] )-> Optional[int]: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "size" ) ) self.assertTrue(hasattr(A_ , "do_center_crop" ) ) self.assertTrue(hasattr(A_ , "center_crop" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_convert_rgb" ) ) def A ( self : Optional[int] )-> Union[str, Any]: pass def A ( self : Union[str, Any] )-> str: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
505
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[int] = question_encoder snake_case_ : Optional[int] = generator snake_case_ : Optional[Any] = self.question_encoder def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' ) snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any: '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ ) if config is None: snake_case_ : int = RagConfig.from_pretrained(__magic_name__ ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' return self.generator.decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = self.question_encoder def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.generator def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding: '''simple docstring''' warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __magic_name__ , ) if max_length is None: snake_case_ : Dict = self.current_tokenizer.model_max_length snake_case_ : List[str] = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: snake_case_ : Optional[int] = self.current_tokenizer.model_max_length snake_case_ : Union[str, Any] = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) snake_case_ : str = labels['''input_ids'''] return model_inputs
60
0
from torch import nn def _a ( UpperCamelCase_ : List[str] ) -> int: """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"Unsupported activation function: {act_fn}" )
339
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Optional[Any] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : List[Any] = use_labels snake_case_ : Optional[int] = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : Any = hidden_act snake_case_ : List[str] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = type_sequence_label_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ : Any = (image_size // patch_size) ** 2 snake_case_ : int = num_patches + 1 def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : List[Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : int = self.get_config() return config, pixel_values, labels def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = ViTMSNModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = self.type_sequence_label_size snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : Optional[int] = 1 snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Any = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowerCamelCase_ : Optional[int] = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : List[Any] = ViTMSNModelTester(self ) snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(__magic_name__ ) snake_case_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[int] = [*signature.parameters.keys()] snake_case_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' torch.manual_seed(2 ) snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ ) snake_case_ : str = self.default_image_processor snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**__magic_name__ ) # verify the logits snake_case_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
60
0
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) _lowerCamelCase : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=_a ) class lowerCAmelCase__ : '''simple docstring''' lowercase_ = 42 lowercase_ = 42 lowercase_ = None lowercase_ = None lowercase_ = None @dataclass(frozen=_a ) class lowerCAmelCase__ : '''simple docstring''' lowercase_ = 42 lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None if is_torch_available(): import torch from torch.utils.data import Dataset class lowerCAmelCase__ ( _a ): '''simple docstring''' lowercase_ = 42 def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__=False , lowercase__ = False , ): '''simple docstring''' __A =hans_processors[task]() __A =os.path.join( lowercase__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(lowercase__ ) , lowercase__ , ) , ) __A =processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __A =label_list[2], label_list[1] __A =label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __A =cached_features_file + '''.lock''' with FileLock(lowercase__ ): if os.path.exists(lowercase__ ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) __A =torch.load(lowercase__ ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) __A =( processor.get_dev_examples(lowercase__ ) if evaluate else processor.get_train_examples(lowercase__ ) ) logger.info('''Training examples: %s''' , len(lowercase__ ) ) __A =hans_convert_examples_to_features(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) logger.info('''Saving features into cached file %s''' , lowercase__ ) torch.save(self.features , lowercase__ ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , lowercase__ ): '''simple docstring''' return self.features[i] def __UpperCamelCase ( self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class lowerCAmelCase__ : '''simple docstring''' lowercase_ = 42 def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_2_8 , lowercase__=False , lowercase__ = False , ): '''simple docstring''' __A =hans_processors[task]() __A =processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __A =label_list[2], label_list[1] __A =label_list __A =processor.get_dev_examples(lowercase__ ) if evaluate else processor.get_train_examples(lowercase__ ) __A =hans_convert_examples_to_features(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(lowercase__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __A =tf.data.Dataset.from_generator( lowercase__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def __UpperCamelCase ( self ): '''simple docstring''' return self.dataset def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , lowercase__ ): '''simple docstring''' return self.features[i] def __UpperCamelCase ( self ): '''simple docstring''' return self.label_list class lowerCAmelCase__ ( _a ): '''simple docstring''' def __UpperCamelCase ( self , lowercase__ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(lowercase__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def __UpperCamelCase ( self , lowercase__ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(lowercase__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def __UpperCamelCase ( self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def __UpperCamelCase ( self , lowercase__ , lowercase__ ): '''simple docstring''' __A =[] for i, line in enumerate(lowercase__ ): if i == 0: continue __A ='''%s-%s''' % (set_type, line[0]) __A =line[5] __A =line[6] __A =line[7][2:] if line[7].startswith('''ex''' ) else line[7] __A =line[0] examples.append(InputExample(guid=lowercase__ , text_a=lowercase__ , text_b=lowercase__ , label=lowercase__ , pairID=lowercase__ ) ) return examples def A__ ( __A : Any , __A : Union[str, Any] , __A : Optional[int] , __A : Tuple , ) ->Any: __A ={label: i for i, label in enumerate(_UpperCamelCase )} __A =[] for ex_index, example in tqdm.tqdm(enumerate(_UpperCamelCase ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d''' % (ex_index) ) __A =tokenizer( example.text_a , example.text_b , add_special_tokens=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' , truncation=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , ) __A =label_map[example.label] if example.label in label_map else 0 __A =int(example.pairID ) features.append(InputFeatures(**_UpperCamelCase , label=_UpperCamelCase , pairID=_UpperCamelCase ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(F'''guid: {example}''' ) logger.info(F'''features: {features[i]}''' ) return features _lowerCamelCase : List[Any] = { '''hans''': 3, } _lowerCamelCase : Dict = { '''hans''': HansProcessor, }
184
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[Any] = '''efficientnet''' def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[str] = num_channels snake_case_ : Tuple = image_size snake_case_ : Union[str, Any] = width_coefficient snake_case_ : Tuple = depth_coefficient snake_case_ : Optional[Any] = depth_divisor snake_case_ : Optional[int] = kernel_sizes snake_case_ : str = in_channels snake_case_ : Optional[Any] = out_channels snake_case_ : int = depthwise_padding snake_case_ : Optional[Any] = strides snake_case_ : Any = num_block_repeats snake_case_ : Optional[Any] = expand_ratios snake_case_ : Union[str, Any] = squeeze_expansion_ratio snake_case_ : Union[str, Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dim snake_case_ : Any = pooling_type snake_case_ : List[str] = initializer_range snake_case_ : str = batch_norm_eps snake_case_ : Optional[int] = batch_norm_momentum snake_case_ : Optional[Any] = dropout_rate snake_case_ : List[str] = drop_connect_rate snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4 class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-5
60
0
"""simple docstring""" import heapq import sys import numpy as np a : Optional[int] = tuple[int, int] class __UpperCAmelCase: """simple docstring""" def __init__( self ): '''simple docstring''' lowercase__ : int= [] lowercase__ : Dict= set() def UpperCAmelCase_ ( self ): '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float("inf" ) def UpperCAmelCase_ ( self ): '''simple docstring''' return len(self.elements ) == 0 def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(snake_case__ ) else: # update # print("update", item) lowercase__ : Dict= [] (lowercase__) : List[Any]= heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) (lowercase__) : List[str]= heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if item in self.set: self.set.remove(snake_case__ ) lowercase__ : List[str]= [] (lowercase__) : List[str]= heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) (lowercase__) : Tuple= heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return self.elements[0][1] def UpperCAmelCase_ ( self ): '''simple docstring''' (lowercase__) : Any= heapq.heappop(self.elements ) self.set.remove(snake_case__ ) return (priority, item) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : Union[str, Any]= np.array(_UpperCamelCase ) lowercase__ : int= np.array(_UpperCamelCase ) return np.linalg.norm(a - b ) def lowercase__(A , A ) ->Dict: """simple docstring""" return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t def lowercase__(A , A ) ->List[Any]: """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowercase__(A , A , A , A ) ->int: """simple docstring""" lowercase__ : Optional[int]= g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase ) return ans def lowercase__(A , A , A ) ->Any: """simple docstring""" lowercase__ : List[Any]= np.chararray((n, n) ) for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): lowercase__ : Any= '''*''' for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if (j, (n - 1) - i) in blocks: lowercase__ : Dict= '''#''' lowercase__ : List[str]= '''-''' lowercase__ : Dict= back_pointer[goal] while x != start: (lowercase__) : Dict= x # print(x) lowercase__ : Dict= '''-''' lowercase__ : str= back_pointer[x] lowercase__ : Union[str, Any]= '''-''' for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=" " ) print("<-- End position" , end=" " ) else: print(grid[i][j] , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) print("PATH TAKEN BY THE ALGORITHM IS:-" ) lowercase__ : Optional[int]= back_pointer[goal] while x != start: print(_UpperCamelCase , end=" " ) lowercase__ : Tuple= back_pointer[x] print(_UpperCamelCase ) sys.exit() def lowercase__(A ) ->Union[str, Any]: """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowercase__(A , A , A , A , A , A , A , A , ) ->Any: """simple docstring""" for itera in range(_UpperCamelCase ): open_list[itera].remove_element(_UpperCamelCase ) # print("s", s) # print("j", j) (lowercase__) : str= s lowercase__ : Dict= (x - 1, y) lowercase__ : str= (x + 1, y) lowercase__ : List[str]= (x, y + 1) lowercase__ : Optional[Any]= (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(_UpperCamelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(_UpperCamelCase ) lowercase__ : int= -1 lowercase__ : str= float("inf" ) if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1: lowercase__ : List[Any]= g_function[s] + 1 lowercase__ : Optional[int]= s if neighbours not in close_list_anchor: open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) ) if neighbours not in close_list_inad: for var in range(1 , _UpperCamelCase ): if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key( _UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ): open_list[j].put( _UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ) def lowercase__() ->Dict: """simple docstring""" lowercase__ : List[Any]= [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list a : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} a : Dict = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] a : Union[str, Any] = make_common_ground() a : Dict = blocks_blk # hyper parameters a : str = 1 a : Tuple = 1 a : List[Any] = 20 a : Tuple = 3 # one consistent and two other inconsistent # start and end destination a : List[str] = (0, 0) a : Tuple = (n - 1, n - 1) a : str = 1 def lowercase__(A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : Optional[Any]= {start: 0, goal: float("inf" )} lowercase__ : int= {start: -1, goal: -1} lowercase__ : Any= [] lowercase__ : Tuple= set() for i in range(_UpperCamelCase ): open_list.append(PriorityQueue() ) open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ) lowercase__ : list[int]= [] lowercase__ : list[int]= [] while open_list[0].minkey() < float("inf" ): for i in range(1 , _UpperCamelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("inf" ): do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: lowercase__ : Dict= open_list[i].top_show() visited.add(_UpperCamelCase ) expand_state( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) close_list_inad.append(_UpperCamelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("inf" ): do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: lowercase__ : List[Any]= open_list[0].top_show() visited.add(_UpperCamelCase ) expand_state( _UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) close_list_anchor.append(_UpperCamelCase ) print("No path found to goal" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(_UpperCamelCase ): if (j, i) in blocks: print("#" , end=" " ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("*" , end=" " ) else: print("-" , end=" " ) else: print("*" , end=" " ) if (j, i) == (n - 1, n - 1): print("<-- End position" , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
218
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowerCAmelCase_ = logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int) lowerCAmelCase_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowerCAmelCase_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowerCAmelCase_ = Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase_ = [0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
0
from scipy.stats import pearsonr import datasets __lowerCamelCase : Dict = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ __lowerCamelCase : str = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 """ __lowerCamelCase : Optional[Any] = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : List[str] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , ) def _lowercase ( self : str , __A : Optional[int] , __A : Dict , __A : Union[str, Any]=False ): if return_pvalue: snake_case__ : Dict = pearsonr(__A , __A ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__A , __A )[0] )}
297
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } snake_case_ : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): snake_case_ : int = token_dict['''token'''] snake_case_ : Optional[int] = Tokenizer(Unigram() ) snake_case_ : int = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) snake_case_ : Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ), pre_tokenizers.Digits(individual_digits=__magic_name__ ), pre_tokenizers.Punctuation(), ] ) snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ) snake_case_ : Optional[Any] = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) snake_case_ : Optional[Any] = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) if isinstance(__magic_name__ , __magic_name__ ): snake_case_ : Dict = [files] self._tokenizer.train(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int: '''simple docstring''' snake_case_ : Any = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = json.loads(self._tokenizer.to_str() ) snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id'''] snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
60
0
from __future__ import annotations from random import random from typing import Generic, TypeVar _UpperCamelCase : List[Any] =TypeVar('KT') _UpperCamelCase : List[str] =TypeVar('VT') class UpperCAmelCase__ ( Generic[KT, VT] ): def __init__( self ,A__ = "root" ,A__ = None ): _A : Union[str, Any] = key _A : Dict = value _A : list[Node[KT, VT]] = [] def __repr__( self ): return f"""Node({self.key}: {self.value})""" @property def A__ ( self ): return len(self.forward ) class UpperCAmelCase__ ( Generic[KT, VT] ): def __init__( self ,A__ = 0.5 ,A__ = 16 ): _A : Node[KT, VT] = Node[KT, VT]() _A : Dict = 0 _A : Any = p _A : Optional[Any] = max_level def __str__( self ): _A : Dict = list(self ) if len(A__ ) == 0: return f"""SkipList(level={self.level})""" _A : int = max((len(str(A__ ) ) for item in items) ,default=4 ) _A : Optional[Any] = max(A__ ,4 ) + 4 _A : Union[str, Any] = self.head _A : Any = [] _A : int = node.forward.copy() lines.append(f"""[{node.key}]""".ljust(A__ ,'''-''' ) + '''* ''' * len(A__ ) ) lines.append(''' ''' * label_size + '''| ''' * len(A__ ) ) while len(node.forward ) != 0: _A : List[str] = node.forward[0] lines.append( f"""[{node.key}]""".ljust(A__ ,'''-''' ) + ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) ) lines.append(''' ''' * label_size + '''| ''' * len(A__ ) ) _A : List[Any] = node.forward lines.append('''None'''.ljust(A__ ) + '''* ''' * len(A__ ) ) return f"""SkipList(level={self.level})\n""" + "\n".join(A__ ) def __iter__( self ): _A : int = self.head while len(node.forward ) != 0: yield node.forward[0].key _A : List[str] = node.forward[0] def A__ ( self ): _A : Optional[int] = 1 while random() < self.p and level < self.max_level: level += 1 return level def A__ ( self ,A__ ): _A : List[Any] = [] _A : Union[str, Any] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: _A : List[str] = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(A__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def A__ ( self ,A__ ): _A : Union[str, Any] = self._locate_node(A__ ) if node is not None: for i, update_node in enumerate(A__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: _A : List[Any] = node.forward[i] else: _A : Any = update_node.forward[:i] def A__ ( self ,A__ ,A__ ): _A : int = self._locate_node(A__ ) if node is not None: _A : Optional[int] = value else: _A : List[str] = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 ,A__ ): update_vector.append(self.head ) _A : List[str] = level _A : Any = Node(A__ ,A__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(A__ ) else: _A : Tuple = new_node def A__ ( self ,A__ ): _A : int = self._locate_node(A__ ) if node is not None: return node.value return None def a__ () -> List[str]: _A : int = SkipList() skip_list.insert('''Key1''' , 3 ) skip_list.insert('''Key2''' , 12 ) skip_list.insert('''Key3''' , 41 ) skip_list.insert('''Key4''' , -19 ) _A : Optional[Any] = skip_list.head _A : Optional[int] = {} while node.level != 0: _A : Optional[Any] = node.forward[0] _A : Union[str, Any] = node.value assert len(_UpperCamelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def a__ () -> Optional[Any]: _A : List[Any] = SkipList() skip_list.insert('''Key1''' , 10 ) skip_list.insert('''Key1''' , 12 ) skip_list.insert('''Key5''' , 7 ) skip_list.insert('''Key7''' , 10 ) skip_list.insert('''Key10''' , 5 ) skip_list.insert('''Key7''' , 7 ) skip_list.insert('''Key5''' , 5 ) skip_list.insert('''Key10''' , 10 ) _A : str = skip_list.head _A : Tuple = {} while node.level != 0: _A : List[str] = node.forward[0] _A : Union[str, Any] = node.value if len(_UpperCamelCase ) != 4: print() assert len(_UpperCamelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def a__ () -> int: _A : Optional[Any] = SkipList() assert skip_list.find('''Some key''' ) is None def a__ () -> List[str]: _A : List[str] = SkipList() skip_list.insert('''Key2''' , 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''' , 10 ) skip_list.insert('''Key2''' , 8 ) skip_list.insert('''V''' , 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def a__ () -> Optional[Any]: _A : List[Any] = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def a__ () -> int: _A : Union[str, Any] = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def a__ () -> Union[str, Any]: _A : Optional[int] = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def a__ () -> List[Any]: _A : int = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 142 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''X''' ) def traverse_keys(__lowercase :List[Any] ): yield node.key for forward_node in node.forward: yield from traverse_keys(_UpperCamelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def a__ () -> List[str]: def is_sorted(__lowercase :Union[str, Any] ): return all(next_item >= item for item, next_item in zip(_UpperCamelCase , lst[1:] ) ) _A : str = SkipList() for i in range(10 ): skip_list.insert(_UpperCamelCase , _UpperCamelCase ) assert is_sorted(list(_UpperCamelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_UpperCamelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_UpperCamelCase ) ) def a__ () -> Any: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def a__ () -> List[Any]: _A : Optional[Any] = SkipList() skip_list.insert(2 , '''2''' ) skip_list.insert(4 , '''4''' ) skip_list.insert(6 , '''4''' ) skip_list.insert(4 , '''5''' ) skip_list.insert(8 , '''4''' ) skip_list.insert(9 , '''4''' ) skip_list.delete(4 ) print(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
206
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = [False] * len(_UpperCamelCase ) snake_case_ : int = [-1] * len(_UpperCamelCase ) def dfs(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Dict = True snake_case_ : Dict = c for u in graph[v]: if not visited[u]: dfs(_UpperCamelCase , 1 - c ) for i in range(len(_UpperCamelCase ) ): if not visited[i]: dfs(_UpperCamelCase , 0 ) for i in range(len(_UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
60
0
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py a__ : Union[str, Any] = """src/diffusers""" # Matches is_xxx_available() a__ : Optional[Any] = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla a__ : Dict = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") a__ : int = """ {0} = None """ a__ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ a__ : Tuple = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def A__ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = _re_backend.findall(_UpperCamelCase ) if len(_UpperCamelCase ) == 0: return None return "_and_".join(_UpperCamelCase ) def A__ ( ): """simple docstring""" with open(os.path.join(_UpperCamelCase, '__init__.py' ), 'r', encoding='utf-8', newline='\n' ) as f: _lowerCAmelCase = f.readlines() # Get to the point we do the actual imports for type checking _lowerCAmelCase = 0 _lowerCAmelCase = {} # Go through the end of the file while line_index < len(_UpperCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _lowerCAmelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('else:' ): line_index += 1 line_index += 1 _lowerCAmelCase = [] # Until we unindent, add backend objects to the list while line_index < len(_UpperCamelCase ) and len(lines[line_index] ) > 1: _lowerCAmelCase = lines[line_index] _lowerCAmelCase = _re_single_line_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_UpperCamelCase ) > 0: _lowerCAmelCase = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(_UpperCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(_UpperCamelCase, _UpperCamelCase ) else: return DUMMY_CLASS.format(_UpperCamelCase, _UpperCamelCase ) def A__ ( __lowerCamelCase=None ): """simple docstring""" if backend_specific_objects is None: _lowerCAmelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename _lowerCAmelCase = {} for backend, objects in backend_specific_objects.items(): _lowerCAmelCase = '''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('_and_' ) ) + ''']''' _lowerCAmelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_UpperCamelCase, _UpperCamelCase ) for o in objects] ) _lowerCAmelCase = dummy_file return dummy_files def A__ ( __lowerCamelCase=False ): """simple docstring""" _lowerCAmelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _lowerCAmelCase = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. _lowerCAmelCase = os.path.join(_UpperCamelCase, 'utils' ) _lowerCAmelCase = { backend: os.path.join(_UpperCamelCase, F'''dummy_{short_names.get(_UpperCamelCase, _UpperCamelCase )}_objects.py''' ) for backend in dummy_files.keys() } _lowerCAmelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_UpperCamelCase ): with open(_UpperCamelCase, 'r', encoding='utf-8', newline='\n' ) as f: _lowerCAmelCase = f.read() else: _lowerCAmelCase = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(_UpperCamelCase, _UpperCamelCase )}_objects.py as the main ''' '__init__ has new objects.' ) with open(dummy_file_paths[backend], 'w', encoding='utf-8', newline='\n' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( 'The main __init__ has objects that are not present in ' F'''diffusers.utils.dummy_{short_names.get(_UpperCamelCase, _UpperCamelCase )}_objects.py. Run `make fix-copies` ''' 'to fix this.' ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a__ : Optional[int] = parser.parse_args() check_dummies(args.fix_and_overwrite)
589
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int: '''simple docstring''' snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20} snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case_ : str = parent snake_case_ : Optional[int] = batch_size snake_case_ : Dict = num_channels snake_case_ : List[Any] = image_size snake_case_ : Union[str, Any] = min_resolution snake_case_ : Tuple = max_resolution snake_case_ : str = do_resize snake_case_ : Tuple = size snake_case_ : int = do_center_crop snake_case_ : Tuple = crop_size snake_case_ : int = do_normalize snake_case_ : Optional[Any] = image_mean snake_case_ : List[str] = image_std snake_case_ : str = do_reduce_labels def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] ) snake_case_ : str = Image.open(dataset[1]['''file'''] ) return image, map def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] ) snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] ) snake_case_ : List[str] = Image.open(ds[2]['''file'''] ) snake_case_ : str = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = BeitImageProcessingTester(self ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) snake_case_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) snake_case_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs() snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) snake_case_ : List[Any] = True snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
60
0
from ..utils import DummyObject, requires_backends class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase__ : int , **lowerCamelCase__ : str ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Tuple ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Tuple ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) def UpperCAmelCase__ ( *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ): '''simple docstring''' requires_backends(_UpperCamelCase , ['''torch'''] ) def UpperCAmelCase__ ( *__magic_name__ : List[str] , **__magic_name__ : int ): '''simple docstring''' requires_backends(_UpperCamelCase , ['''torch'''] ) def UpperCAmelCase__ ( *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ): '''simple docstring''' requires_backends(_UpperCamelCase , ['''torch'''] ) def UpperCAmelCase__ ( *__magic_name__ : Any , **__magic_name__ : Any ): '''simple docstring''' requires_backends(_UpperCamelCase , ['''torch'''] ) def UpperCAmelCase__ ( *__magic_name__ : Dict , **__magic_name__ : Union[str, Any] ): '''simple docstring''' requires_backends(_UpperCamelCase , ['''torch'''] ) def UpperCAmelCase__ ( *__magic_name__ : Any , **__magic_name__ : str ): '''simple docstring''' requires_backends(_UpperCamelCase , ['''torch'''] ) def UpperCAmelCase__ ( *__magic_name__ : int , **__magic_name__ : Tuple ): '''simple docstring''' requires_backends(_UpperCamelCase , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : int ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[str] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : int ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : str , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : str , *lowerCamelCase__ : str , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Tuple ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : List[str] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : List[str] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Dict , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[int] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : List[str] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Dict ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : str , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : int , *lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Dict , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : int , *lowerCamelCase__ : int , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Union[str, Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : int , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Dict ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : str , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : str , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : str ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : Dict ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : str , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Dict ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Tuple ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Dict , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Dict ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[Any] ): requires_backends(cls , ['''torch'''] ) class __magic_name__ ( metaclass=_a ): _lowerCAmelCase = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Tuple ): requires_backends(self , ['''torch'''] ) @classmethod def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : str ): requires_backends(cls , ['''torch'''] ) @classmethod def _A ( cls : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Any ): requires_backends(cls , ['''torch'''] )
348
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : List[Any] = mean_squared_error( __magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ ) return {"mse": mse}
60
0
"""simple docstring""" from math import factorial _a : Dict = {str(d): factorial(d) for d in range(10)} def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> int: return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCamelCase ) ) def SCREAMING_SNAKE_CASE ( ) -> int: _lowerCAmelCase : Optional[int] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 ,_UpperCamelCase ) if sum_of_digit_factorial(_UpperCamelCase ) == i ) if __name__ == "__main__": print(F"""{solution() = }""")
213
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : lowerCamelCase_ : Any = None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
60
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) a_ = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class __lowerCAmelCase ( _a ): lowerCAmelCase__ = '''deta''' lowerCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=900 , __UpperCAmelCase=2048 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=1024 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=True , __UpperCAmelCase=300 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.25 , **__UpperCAmelCase , ): '''simple docstring''' if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = backbone_config.pop('''model_type''' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(__UpperCAmelCase ) __lowerCamelCase = backbone_config __lowerCamelCase = num_queries __lowerCamelCase = max_position_embeddings __lowerCamelCase = d_model __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = auxiliary_loss __lowerCamelCase = position_embedding_type # deformable attributes __lowerCamelCase = num_feature_levels __lowerCamelCase = encoder_n_points __lowerCamelCase = decoder_n_points __lowerCamelCase = two_stage __lowerCamelCase = two_stage_num_proposals __lowerCamelCase = with_box_refine __lowerCamelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher __lowerCamelCase = class_cost __lowerCamelCase = bbox_cost __lowerCamelCase = giou_cost # Loss coefficients __lowerCamelCase = mask_loss_coefficient __lowerCamelCase = dice_loss_coefficient __lowerCamelCase = bbox_loss_coefficient __lowerCamelCase = giou_loss_coefficient __lowerCamelCase = eos_coefficient __lowerCamelCase = focal_alpha super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase ) @property def lowerCamelCase ( self ): '''simple docstring''' return self.encoder_attention_heads @property def lowerCamelCase ( self ): '''simple docstring''' return self.d_model def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
175
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : str = None @staticmethod def lowerCamelCase () -> Any: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase (cls ) -> List[Any]: '''simple docstring''' return F'''`pip install {cls.pip_package or cls.name}`''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[int] = '''optuna''' @staticmethod def lowerCamelCase () -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_optuna(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''ray''' lowerCamelCase_ : List[str] = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase () -> List[Any]: '''simple docstring''' return is_ray_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_ray(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''sigopt''' @staticmethod def lowerCamelCase () -> Optional[int]: '''simple docstring''' return is_sigopt_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' return default_hp_space_sigopt(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''wandb''' @staticmethod def lowerCamelCase () -> Dict: '''simple docstring''' return is_wandb_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return default_hp_space_wandb(__magic_name__ ) lowerCAmelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_UpperCamelCase ) > 0: snake_case_ : Dict = available_backends[0].name if len(_UpperCamelCase ) > 1: logger.info( f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
60
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
245
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list: """simple docstring""" snake_case_ : Tuple = len(_UpperCamelCase ) snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): snake_case_ : Any = y_points[i] for i in range(2 , _UpperCamelCase ): for j in range(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[int] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
60
0
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase (_snake_case ) -> List[str]: '''simple docstring''' for param in module.parameters(): __UpperCamelCase = False def lowercase () -> Dict: '''simple docstring''' __UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): __UpperCamelCase = '''mps''' if device == "mps": print( "WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues" " with generations." ) return device def lowercase (_snake_case ) -> str: '''simple docstring''' __UpperCamelCase = plt.imshow(_UpperCamelCase ) fig.axes.get_xaxis().set_visible(_UpperCamelCase ) fig.axes.get_yaxis().set_visible(_UpperCamelCase ) plt.show() def lowercase () -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = datetime.now() __UpperCamelCase = current_time.strftime("%H:%M:%S" ) return timestamp
505
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging a_ = logging.get_logger(__name__) class lowercase__ : a_ =42 a_ =None @staticmethod def UpperCAmelCase ( )-> Any: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> Dict: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def UpperCAmelCase ( cls )-> List[Any]: '''simple docstring''' return F"`pip install {cls.pip_package or cls.name}`" class lowercase__ ( _a ): a_ ='''optuna''' @staticmethod def UpperCAmelCase ( )-> Union[str, Any]: '''simple docstring''' return is_optuna_available() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return run_hp_search_optuna(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return default_hp_space_optuna(__UpperCAmelCase ) class lowercase__ ( _a ): a_ ='''ray''' a_ ='''\'ray[tune]\'''' @staticmethod def UpperCAmelCase ( )-> List[Any]: '''simple docstring''' return is_ray_available() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return run_hp_search_ray(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return default_hp_space_ray(__UpperCAmelCase ) class lowercase__ ( _a ): a_ ='''sigopt''' @staticmethod def UpperCAmelCase ( )-> Optional[int]: '''simple docstring''' return is_sigopt_available() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> List[str]: '''simple docstring''' return run_hp_search_sigopt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' return default_hp_space_sigopt(__UpperCAmelCase ) class lowercase__ ( _a ): a_ ='''wandb''' @staticmethod def UpperCAmelCase ( )-> Dict: '''simple docstring''' return is_wandb_available() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return run_hp_search_wandb(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return default_hp_space_wandb(__UpperCAmelCase ) a_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def _a ( ) -> str: """simple docstring""" lowerCAmelCase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_UpperCamelCase ) > 0: lowerCAmelCase__ = available_backends[0].name if len(_UpperCamelCase ) > 1: logger.info( F"{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
339
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return getitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" return setitem, k, v def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" return delitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str: """simple docstring""" try: return fun(_UpperCamelCase , *_UpperCamelCase ), None except Exception as e: return None, e lowerCAmelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCAmelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Any = HashMap(initial_block_size=4 ) snake_case_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(_UpperCamelCase ): snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) assert my_res == py_res assert str(_UpperCamelCase ) == str(_UpperCamelCase ) assert set(_UpperCamelCase ) == set(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" def is_public(_UpperCamelCase ) -> bool: return not name.startswith('''_''' ) snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )} snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )} assert dict_public_names > hash_public_names
60
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _lowerCamelCase : Optional[Any] = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] _lowerCamelCase : List[Any] = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] _lowerCamelCase : Any = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) _lowerCamelCase : Tuple = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) _lowerCamelCase : str = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def A__ ( __A : List[str] , __A : Union[str, Any] ) ->Optional[int]: for tf_name, hf_name in patterns: __A =k.replace(_UpperCamelCase , _UpperCamelCase ) return k def A__ ( __A : int , __A : int ) ->BigBirdPegasusForConditionalGeneration: __A =BigBirdPegasusConfig(**_UpperCamelCase ) __A =BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __A =torch_model.state_dict() __A ={} # separating decoder weights __A ={k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __A ={k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ): __A =[k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __A =DECODER_PATTERNS __A =rename_state_dict_key(_UpperCamelCase , _UpperCamelCase ) if new_k not in state_dict: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __A =v.T __A =torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ): __A =[k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __A =REMAINING_PATTERNS __A =rename_state_dict_key(_UpperCamelCase , _UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __A =v.T __A =torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' __A =mapping['''model.embed_positions.weight'''] __A =mapping.pop('''model.embed_positions.weight''' ) __A =torch_model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) __A =[ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def A__ ( __A : Optional[Any] ) ->Dict: __A =tf.train.list_variables(_UpperCamelCase ) __A ={} __A =['''global_step'''] for name, shape in tqdm(_UpperCamelCase , desc='''converting tf checkpoint to dict''' ): __A =any(pat in name for pat in ignore_name ) if skip_key: continue __A =tf.train.load_variable(_UpperCamelCase , _UpperCamelCase ) __A =array return tf_weights def A__ ( __A : Optional[int] , __A : Union[str, Any] , __A : List[Any] ) ->int: __A =get_tf_weights_as_numpy(_UpperCamelCase ) __A =convert_bigbird_pegasus(_UpperCamelCase , _UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": _lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') _lowerCamelCase : Optional[Any] = parser.parse_args() _lowerCamelCase : int = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
184
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase ) -> list: """simple docstring""" if len(_UpperCamelCase ) == 0: return [] snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase ) snake_case_ : List[str] = int(max_value - min_value ) + 1 snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )] for i in my_list: buckets[int(i - min_value )].append(_UpperCamelCase ) return [v for bucket in buckets for v in sorted(_UpperCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
60
0
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __UpperCAmelCase( _a , unittest.TestCase ): """simple docstring""" __lowerCamelCase = BlenderbotSmallTokenizer __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' super().setUp() lowercase__ : Optional[int]= ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] lowercase__ : str= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Tuple= ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] lowercase__ : List[str]= {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} lowercase__ : Optional[int]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= '''adapt act apte''' lowercase__ : int= '''adapt act apte''' return input_text, output_text def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : List[Any]= '''adapt act apte''' lowercase__ : Dict= ['''adapt''', '''act''', '''ap@@''', '''te'''] lowercase__ : Any= tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowercase__ : Tuple= [tokenizer.bos_token] + tokens + [tokenizer.eos_token] lowercase__ : str= [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1384] lowercase__ : int= '''I am a small frog.''' lowercase__ : Dict= tok([src_text] , padding=snake_case__ , truncation=snake_case__ )['''input_ids'''] lowercase__ : str= tok.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) lowercase__ : Tuple= '''I am a small frog .''' lowercase__ : Optional[Any]= '''.''' lowercase__ : int= tok(snake_case__ )['''input_ids'''] lowercase__ : Any= tok(snake_case__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
218
import tensorflow as tf from ...tf_utils import shape_list class __lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[Any] = vocab_size snake_case_ : Dict = d_embed snake_case_ : Union[str, Any] = d_proj snake_case_ : str = cutoffs + [vocab_size] snake_case_ : int = [0] + self.cutoffs snake_case_ : Optional[int] = div_val snake_case_ : int = self.cutoffs[0] snake_case_ : Any = len(self.cutoffs ) - 1 snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters snake_case_ : str = keep_order snake_case_ : int = [] snake_case_ : Union[str, Any] = [] def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' if self.n_clusters > 0: snake_case_ : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case_ : List[str] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , ) self.out_projs.append(__magic_name__ ) else: self.out_projs.append(__magic_name__ ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : List[str] = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i) snake_case_ : int = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' ) self.out_projs.append(__magic_name__ ) snake_case_ : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : Any = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(__magic_name__ ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = x if proj is not None: snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ ) return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = shape_list(__magic_name__ ) snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) snake_case_ : Dict = tf.stack([r, target] , 1 ) return tf.gather_nd(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = 0 if self.n_clusters == 0: snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ ) snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 ) else: snake_case_ : Optional[int] = shape_list(__magic_name__ ) snake_case_ : int = [] snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case_ : str = (target >= l_idx) & (target < r_idx) snake_case_ : Dict = tf.where(__magic_name__ ) snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx if self.div_val == 1: snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx] snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx] else: snake_case_ : Union[str, Any] = self.out_layers[i][0] snake_case_ : int = self.out_layers[i][1] if i == 0: snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 ) snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] ) snake_case_ : Any = tf.nn.log_softmax(__magic_name__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ ) else: snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] ) snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ ) snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__magic_name__ ) if target is not None: snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) ) snake_case_ : str = tf.concat(__magic_name__ , axis=-1 ) if target is not None: if return_mean: snake_case_ : int = tf.reduce_mean(__magic_name__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__magic_name__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
60
0
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : int , __A : int , __A : int=9_9 , __A : Optional[Any]=1_3 , __A : Optional[int]=1_6 , __A : List[Any]=7 , __A : Dict=True , __A : Union[str, Any]=True , __A : Any=True , __A : Any=False , __A : str=True , __A : Tuple=2 , __A : int=3_2 , __A : Union[str, Any]=4 , __A : Tuple=4 , __A : Tuple=3_0 , __A : Optional[int]=0 , __A : List[str]=1 , __A : Tuple=2 , __A : Any=None , ): snake_case__ : Optional[int] = parent snake_case__ : Union[str, Any] = batch_size snake_case__ : Optional[Any] = decoder_seq_length # For common tests snake_case__ : Tuple = self.decoder_seq_length snake_case__ : Tuple = is_training snake_case__ : Optional[Any] = use_attention_mask snake_case__ : Optional[int] = use_labels snake_case__ : Dict = vocab_size snake_case__ : Tuple = d_model snake_case__ : Tuple = d_model snake_case__ : str = decoder_layers snake_case__ : List[str] = decoder_layers snake_case__ : List[str] = decoder_ffn_dim snake_case__ : int = decoder_attention_heads snake_case__ : List[str] = decoder_attention_heads snake_case__ : List[Any] = eos_token_id snake_case__ : Optional[int] = bos_token_id snake_case__ : Optional[Any] = pad_token_id snake_case__ : str = decoder_start_token_id snake_case__ : List[Any] = use_cache snake_case__ : int = max_position_embeddings snake_case__ : Optional[int] = None snake_case__ : List[str] = decoder_seq_length snake_case__ : str = 2 snake_case__ : Union[str, Any] = 1 def _lowercase ( self : List[Any] ): snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) snake_case__ : Any = None if self.use_attention_mask: snake_case__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) snake_case__ : List[Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) snake_case__ : Any = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowercase ( self : str , __A : Optional[Any] , __A : int , __A : Optional[Any] , __A : List[Any] , ): snake_case__ : List[str] = True snake_case__ : List[Any] = TrOCRDecoder(config=__A ).to(__A ).eval() snake_case__ : Optional[int] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass snake_case__ : Any = model(__A , use_cache=__A ) snake_case__ : int = model(__A ) snake_case__ : int = model(__A , use_cache=__A ) self.parent.assertTrue(len(__A ) == len(__A ) ) self.parent.assertTrue(len(__A ) == len(__A ) + 1 ) snake_case__ : Union[str, Any] = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids snake_case__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and snake_case__ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ : Tuple = model(__A )['''last_hidden_state'''] snake_case__ : str = model(__A , past_key_values=__A )['''last_hidden_state'''] # select random slice snake_case__ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() snake_case__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__A , __A , atol=1e-3 ) def _lowercase ( self : Any ): snake_case__ : int = self.prepare_config_and_inputs() snake_case__ : Any = config_and_inputs snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _a , _a , _a , unittest.TestCase ): """simple docstring""" a_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () a_ = (TrOCRForCausalLM,) if is_torch_available() else () a_ = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} a_ = True a_ = False def _lowercase ( self : List[Any] ): snake_case__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=__A ) snake_case__ : int = ConfigTester(self , config_class=__A ) def _lowercase ( self : int ): pass def _lowercase ( self : str ): pass def _lowercase ( self : Tuple ): pass def _lowercase ( self : List[Any] ): self.config_tester.run_common_tests() def _lowercase ( self : int ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__A ) def _lowercase ( self : Optional[Any] ): return @unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :) def _lowercase ( self : int ): pass
297
import requests def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Tuple = {'''Content-Type''': '''application/json'''} snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase ) if response.status_code != 200: snake_case_ : List[Any] = ( '''Request to slack returned an error ''' f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(_UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
60
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( _a ): __snake_case : int = (DDIMParallelScheduler,) __snake_case : Tuple = (('''eta''', 0.0), ('''num_inference_steps''', 5_0)) def A__ ( self ,**A__ ): _A : List[str] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**A__ ) return config def A__ ( self ,**A__ ): _A : Any = self.scheduler_classes[0] _A : Tuple = self.get_scheduler_config(**A__ ) _A : List[Any] = scheduler_class(**A__ ) _A : Optional[Any] = 10, 0.0 _A : Union[str, Any] = self.dummy_model() _A : Tuple = self.dummy_sample_deter scheduler.set_timesteps(A__ ) for t in scheduler.timesteps: _A : Optional[int] = model(A__ ,A__ ) _A : Tuple = scheduler.step(A__ ,A__ ,A__ ,A__ ).prev_sample return sample def A__ ( self ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=A__ ) def A__ ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=A__ ) _A : int = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config(steps_offset=1 ) _A : Optional[int] = scheduler_class(**A__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) ) def A__ ( self ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A__ ,beta_end=A__ ) def A__ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A__ ) def A__ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A__ ) def A__ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=A__ ) def A__ ( self ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=A__ ) def A__ ( self ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=A__ ) def A__ ( self ): self.check_over_configs(thresholding=A__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,) def A__ ( self ): for t in [1, 10, 49]: self.check_over_forward(time_step=A__ ) def A__ ( self ): for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ): self.check_over_forward(time_step=A__ ,num_inference_steps=A__ ) def A__ ( self ): for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=A__ ,eta=A__ ) def A__ ( self ): _A : Any = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**A__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1E-5 def A__ ( self ): _A : Any = self.scheduler_classes[0] _A : int = self.get_scheduler_config() _A : List[str] = scheduler_class(**A__ ) _A : Any = 10, 0.0 scheduler.set_timesteps(A__ ) _A : List[Any] = self.dummy_model() _A : Tuple = self.dummy_sample_deter _A : Optional[Any] = self.dummy_sample_deter + 0.1 _A : Optional[Any] = self.dummy_sample_deter - 0.1 _A : str = samplea.shape[0] _A : Any = torch.stack([samplea, samplea, samplea] ,dim=0 ) _A : Any = torch.arange(A__ )[0:3, None].repeat(1 ,A__ ) _A : int = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) _A : List[Any] = scheduler.batch_step_no_noise(A__ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,A__ ) _A : str = torch.sum(torch.abs(A__ ) ) _A : Optional[int] = torch.mean(torch.abs(A__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def A__ ( self ): _A : Any = self.full_loop() _A : Optional[Any] = torch.sum(torch.abs(A__ ) ) _A : Tuple = torch.mean(torch.abs(A__ ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def A__ ( self ): _A : Optional[Any] = self.full_loop(prediction_type='''v_prediction''' ) _A : Optional[int] = torch.sum(torch.abs(A__ ) ) _A : List[str] = torch.mean(torch.abs(A__ ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def A__ ( self ): _A : Any = self.full_loop(set_alpha_to_one=A__ ,beta_start=0.01 ) _A : Optional[Any] = torch.sum(torch.abs(A__ ) ) _A : Dict = torch.mean(torch.abs(A__ ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def A__ ( self ): _A : Optional[int] = self.full_loop(set_alpha_to_one=A__ ,beta_start=0.01 ) _A : int = torch.sum(torch.abs(A__ ) ) _A : List[str] = torch.mean(torch.abs(A__ ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
206
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
"""simple docstring""" import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __magic_name__ ( unittest.TestCase ): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ): """simple docstring""" _lowerCAmelCase = size if size is not None else {'''height''': 2_0, '''width''': 2_0} _lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = do_reduce_labels def _lowerCamelCase ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def A__ ( ): """simple docstring""" _lowerCAmelCase = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' ) _lowerCAmelCase = Image.open(dataset[0]['file'] ) _lowerCAmelCase = Image.open(dataset[1]['file'] ) return image, map def A__ ( ): """simple docstring""" _lowerCAmelCase = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' ) _lowerCAmelCase = Image.open(ds[0]['file'] ) _lowerCAmelCase = Image.open(ds[1]['file'] ) _lowerCAmelCase = Image.open(ds[2]['file'] ) _lowerCAmelCase = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __magic_name__ ( _a ,unittest.TestCase ): UpperCamelCase : List[Any] = BeitImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = BeitImageProcessingTester(self ) @property def _lowerCamelCase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , 'do_resize' ) ) self.assertTrue(hasattr(__magic_name__ , 'size' ) ) self.assertTrue(hasattr(__magic_name__ , 'do_center_crop' ) ) self.assertTrue(hasattr(__magic_name__ , 'center_crop' ) ) self.assertTrue(hasattr(__magic_name__ , 'do_normalize' ) ) self.assertTrue(hasattr(__magic_name__ , 'image_mean' ) ) self.assertTrue(hasattr(__magic_name__ , 'image_std' ) ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 2_0, 'width': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) _lowerCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=__magic_name__ ) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" pass def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) _lowerCAmelCase = [] for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 2_5_5 ) # Test batched _lowerCAmelCase = image_processing(__magic_name__ , __magic_name__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 2_5_5 ) # Test not batched input (PIL images) _lowerCAmelCase = prepare_semantic_single_inputs() _lowerCAmelCase = image_processing(__magic_name__ , __magic_name__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 2_5_5 ) # Test batched input (PIL images) _lowerCAmelCase = prepare_semantic_batch_inputs() _lowerCAmelCase = image_processing(__magic_name__ , __magic_name__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 2_5_5 ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 _lowerCAmelCase = prepare_semantic_single_inputs() _lowerCAmelCase = image_processing(__magic_name__ , __magic_name__ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 1_5_0 ) _lowerCAmelCase = True _lowerCAmelCase = image_processing(__magic_name__ , __magic_name__ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
589
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''owlvit_text_model''' def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : int = vocab_size snake_case_ : str = hidden_size snake_case_ : List[Any] = intermediate_size snake_case_ : str = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : Union[str, Any] = initializer_range snake_case_ : int = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit_vision_model''' def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : Optional[Any] = hidden_size snake_case_ : Union[str, Any] = intermediate_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : List[Any] = num_channels snake_case_ : Union[str, Any] = image_size snake_case_ : Dict = patch_size snake_case_ : List[Any] = hidden_act snake_case_ : Tuple = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : List[str] = initializer_range snake_case_ : List[Any] = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit''' lowerCamelCase_ : Optional[int] = True def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) if text_config is None: snake_case_ : Tuple = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: snake_case_ : str = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) snake_case_ : str = OwlViTTextConfig(**__magic_name__ ) snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ ) snake_case_ : Any = projection_dim snake_case_ : Union[str, Any] = logit_scale_init_value snake_case_ : str = return_dict snake_case_ : Any = 1.0 @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str: '''simple docstring''' snake_case_ : Optional[int] = {} snake_case_ : Union[str, Any] = text_config snake_case_ : Optional[Any] = vision_config return cls.from_dict(__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : List[Any] = self.text_config.to_dict() snake_case_ : List[Any] = self.vision_config.to_dict() snake_case_ : Tuple = self.__class__.model_type return output class __lowerCAmelCase ( _a ): @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-4 def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : Dict = super().generate_dummy_inputs( processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ ) snake_case_ : List[str] = super().generate_dummy_inputs( processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ ) return {**text_input_dict, **image_input_dict} @property def lowerCamelCase (self ) -> int: '''simple docstring''' return 14
60
0
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( 'pipelines_utils', '0.22.0', 'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.', standard_warn=False, stacklevel=3, )
348
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch'''] lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate''' lowerCamelCase_ : Tuple = '''default_config.yaml''' lowerCamelCase_ : str = config_folder / config_file lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml''' lowerCamelCase_ : Dict = Path('''tests/test_configs''' ) @classmethod def lowerCamelCase (cls ) -> Dict: '''simple docstring''' if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCamelCase (cls ) -> Any: '''simple docstring''' if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=__magic_name__ ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : List[str] = '''test-tpu''' lowerCamelCase_ : Dict = '''us-central1-a''' lowerCamelCase_ : Any = '''ls''' lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config'''] lowerCamelCase_ : Tuple = '''cd /usr/share''' lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh''' lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh''' def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Tuple = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
60
0
"""simple docstring""" import tensorflow as tf from ...tf_utils import shape_list class __A ( tf.keras.layers.Layer ): def __init__( self , a__ , a__ , a__ , a__ , a__=1 , a__=False , **a__ ): super().__init__(**a__ ) _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Dict = d_embed _lowerCAmelCase : Union[str, Any] = d_proj _lowerCAmelCase : str = cutoffs + [vocab_size] _lowerCAmelCase : int = [0] + self.cutoffs _lowerCAmelCase : Optional[int] = div_val _lowerCAmelCase : int = self.cutoffs[0] _lowerCAmelCase : Any = len(self.cutoffs ) - 1 _lowerCAmelCase : Union[str, Any] = self.shortlist_size + self.n_clusters _lowerCAmelCase : str = keep_order _lowerCAmelCase : int = [] _lowerCAmelCase : Union[str, Any] = [] def __A ( self , a__ ): if self.n_clusters > 0: _lowerCAmelCase : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=a__ , name="""cluster_weight""" ) _lowerCAmelCase : Optional[Any] = self.add_weight( shape=(self.n_clusters,) , initializer="""zeros""" , trainable=a__ , name="""cluster_bias""" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: _lowerCAmelCase : List[str] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=a__ , name=F"out_projs_._{i}" , ) self.out_projs.append(a__ ) else: self.out_projs.append(a__ ) _lowerCAmelCase : Optional[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._weight" , ) _lowerCAmelCase : List[str] = self.add_weight( shape=(self.vocab_size,) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._bias" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): _lowerCAmelCase : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] _lowerCAmelCase : Optional[Any] = self.d_embed // (self.div_val**i) _lowerCAmelCase : int = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=a__ , name=F"out_projs_._{i}" ) self.out_projs.append(a__ ) _lowerCAmelCase : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._weight" , ) _lowerCAmelCase : Any = self.add_weight( shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._bias" , ) self.out_layers.append((weight, bias) ) super().build(a__ ) @staticmethod def __A ( a__ , a__ , a__ , a__=None ): _lowerCAmelCase : Union[str, Any] = x if proj is not None: _lowerCAmelCase : List[str] = tf.einsum("""ibd,ed->ibe""" , a__ , a__ ) return tf.einsum("""ibd,nd->ibn""" , a__ , a__ ) + b @staticmethod def __A ( a__ , a__ ): _lowerCAmelCase : Union[str, Any] = shape_list(a__ ) _lowerCAmelCase : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) _lowerCAmelCase : Dict = tf.stack([r, target] , 1 ) return tf.gather_nd(a__ , a__ ) def __A ( self , a__ , a__ , a__=True , a__=False ): _lowerCAmelCase : Optional[Any] = 0 if self.n_clusters == 0: _lowerCAmelCase : Any = self._logit(a__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: _lowerCAmelCase : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a__ , logits=a__ ) _lowerCAmelCase : Optional[Any] = tf.nn.log_softmax(a__ , axis=-1 ) else: _lowerCAmelCase : Optional[int] = shape_list(a__ ) _lowerCAmelCase : int = [] _lowerCAmelCase : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): _lowerCAmelCase : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: _lowerCAmelCase : str = (target >= l_idx) & (target < r_idx) _lowerCAmelCase : Dict = tf.where(a__ ) _lowerCAmelCase : List[str] = tf.boolean_mask(a__ , a__ ) - l_idx if self.div_val == 1: _lowerCAmelCase : Any = self.out_layers[0][0][l_idx:r_idx] _lowerCAmelCase : Dict = self.out_layers[0][1][l_idx:r_idx] else: _lowerCAmelCase : Union[str, Any] = self.out_layers[i][0] _lowerCAmelCase : int = self.out_layers[i][1] if i == 0: _lowerCAmelCase : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 ) _lowerCAmelCase : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) _lowerCAmelCase : Optional[int] = self._logit(a__ , a__ , a__ , self.out_projs[0] ) _lowerCAmelCase : Any = tf.nn.log_softmax(a__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: _lowerCAmelCase : Optional[Any] = tf.boolean_mask(a__ , a__ ) _lowerCAmelCase : Tuple = self._gather_logprob(a__ , a__ ) else: _lowerCAmelCase : Optional[int] = self._logit(a__ , a__ , a__ , self.out_projs[i] ) _lowerCAmelCase : Union[str, Any] = tf.nn.log_softmax(a__ ) _lowerCAmelCase : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster _lowerCAmelCase : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(a__ ) if target is not None: _lowerCAmelCase : Any = tf.boolean_mask(a__ , a__ ) _lowerCAmelCase : Optional[Any] = tf.boolean_mask(a__ , a__ ) _lowerCAmelCase : str = self._gather_logprob(a__ , a__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(a__ , -cur_logprob , shape_list(a__ ) ) _lowerCAmelCase : str = tf.concat(a__ , axis=-1 ) if target is not None: if return_mean: _lowerCAmelCase : int = tf.reduce_mean(a__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(a__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(a__ , name=self.name , aggregation="""mean""" if return_mean else """""" ) return out
213
import warnings from ..trainer import Trainer from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict: '''simple docstring''' warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , __magic_name__ , ) super().__init__(args=__magic_name__ , **__magic_name__ )
60
0
import re from filelock import FileLock try: import nltk a_ = True except (ImportError, ModuleNotFoundError): a_ = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def a__ ( _UpperCamelCase : Dict ): re.sub('''<n>''' ,'''''' ,_UpperCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
175
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : str = '''mock-s3-bucket''' snake_case_ : str = f'''s3://{mock_bucket}''' snake_case_ : Any = extract_path_from_uri(_UpperCamelCase ) assert dataset_path.startswith('''s3://''' ) is False snake_case_ : Optional[Any] = '''./local/path''' snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase ) assert dataset_path == new_dataset_path def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase ) assert is_remote is True snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' ) snake_case_ : int = is_remote_filesystem(_UpperCamelCase ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol] if input_path is None: snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_UpperCamelCase ) snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase ) assert isinstance(_UpperCamelCase , _UpperCamelCase ) snake_case_ : int = os.path.basename(_UpperCamelCase ) snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} snake_case_ : Any = compressed_file_paths[protocol] snake_case_ : Any = '''dataset.jsonl''' snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}''' snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase ) assert fs.isfile(_UpperCamelCase ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase ) snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(_UpperCamelCase ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Tuple = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase ) with pytest.warns(_UpperCamelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_UpperCamelCase ) == 1 assert ( str(warning_info[0].message ) == f'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
60
0
'''simple docstring''' def _A ( snake_case , snake_case ) -> int: while b: _lowercase : Union[str, Any] = b, a % b return a def _A ( snake_case , snake_case ) -> int: return a if b == 0 else euclidean_gcd_recursive(_UpperCamelCase , a % b ) def _A ( ) -> List[str]: print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
245
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[Any] = '''encoder-decoder''' lowerCamelCase_ : Optional[Any] = True def __init__(self , **__magic_name__ ) -> Optional[int]: '''simple docstring''' super().__init__(**__magic_name__ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case_ : Any = kwargs.pop('''encoder''' ) snake_case_ : Tuple = encoder_config.pop('''model_type''' ) snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' ) snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : Any = True @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case_ : Tuple = True snake_case_ : Optional[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : str = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.encoder.to_dict() snake_case_ : Dict = self.decoder.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
60
0
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = ["model.decoder.embed_positions.weights"] def lowercase (_snake_case ) -> int: '''simple docstring''' if "emb" in name: __UpperCamelCase = name.replace("emb" ,"model.decoder.embed_tokens" ) if "transformer" in name: __UpperCamelCase = name.replace("transformer" ,"model.decoder" ) if "cross_attention" in name: __UpperCamelCase = name.replace("cross_attention" ,"encoder_attn" ) if "linear1" in name: __UpperCamelCase = name.replace("linear1" ,"fc1" ) if "linear2" in name: __UpperCamelCase = name.replace("linear2" ,"fc2" ) if "norm1" in name: __UpperCamelCase = name.replace("norm1" ,"self_attn_layer_norm" ) if "norm_cross" in name: __UpperCamelCase = name.replace("norm_cross" ,"encoder_attn_layer_norm" ) if "norm2" in name: __UpperCamelCase = name.replace("norm2" ,"final_layer_norm" ) if "out_norm" in name: __UpperCamelCase = name.replace("out_norm" ,"model.decoder.layer_norm" ) if "linears" in name: __UpperCamelCase = name.replace("linears" ,"lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: __UpperCamelCase = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" ) return name def lowercase (_snake_case ,_snake_case ) -> Tuple[Dict, Dict]: '''simple docstring''' __UpperCamelCase = list(state_dict.keys() ) __UpperCamelCase = {} for key in keys: __UpperCamelCase = state_dict.pop(_UpperCamelCase ) __UpperCamelCase = rename_keys(_UpperCamelCase ) if "in_proj_weight" in key: # split fused qkv proj __UpperCamelCase = val[:hidden_size, :] __UpperCamelCase = val[hidden_size : 2 * hidden_size, :] __UpperCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __UpperCamelCase = val else: __UpperCamelCase = val return state_dict, enc_dec_proj_state_dict def lowercase (_snake_case ) -> MusicgenDecoderConfig: '''simple docstring''' if checkpoint == "small": # default config values __UpperCamelCase = 1024 __UpperCamelCase = 24 __UpperCamelCase = 16 elif checkpoint == "medium": __UpperCamelCase = 1536 __UpperCamelCase = 48 __UpperCamelCase = 24 elif checkpoint == "large": __UpperCamelCase = 2048 __UpperCamelCase = 48 __UpperCamelCase = 32 else: raise ValueError(f"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" ) __UpperCamelCase = MusicgenDecoderConfig( hidden_size=_UpperCamelCase ,ffn_dim=hidden_size * 4 ,num_hidden_layers=_UpperCamelCase ,num_attention_heads=_UpperCamelCase ,) return config @torch.no_grad() def lowercase (_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case="cpu" ) -> int: '''simple docstring''' __UpperCamelCase = MusicGen.get_pretrained(_UpperCamelCase ,device=_UpperCamelCase ) __UpperCamelCase = decoder_config_from_checkpoint(_UpperCamelCase ) __UpperCamelCase = fairseq_model.lm.state_dict() __UpperCamelCase = rename_state_dict( _UpperCamelCase ,hidden_size=decoder_config.hidden_size ) __UpperCamelCase = TaEncoderModel.from_pretrained("t5-base" ) __UpperCamelCase = EncodecModel.from_pretrained("facebook/encodec_32khz" ) __UpperCamelCase = MusicgenForCausalLM(_UpperCamelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __UpperCamelCase = decoder.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" ) if len(_UpperCamelCase ) > 0: raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model __UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=_UpperCamelCase ,audio_encoder=_UpperCamelCase ,decoder=_UpperCamelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_UpperCamelCase ) # check we can do a forward pass __UpperCamelCase = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 ) __UpperCamelCase = input_ids.reshape(2 * 4 ,-1 ) with torch.no_grad(): __UpperCamelCase = model(input_ids=_UpperCamelCase ,decoder_input_ids=_UpperCamelCase ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor __UpperCamelCase = AutoTokenizer.from_pretrained("t5-base" ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" ) __UpperCamelCase = MusicgenProcessor(feature_extractor=_UpperCamelCase ,tokenizer=_UpperCamelCase ) # set the appropriate bos/pad token ids __UpperCamelCase = 2048 __UpperCamelCase = 2048 # set other default generation config params __UpperCamelCase = int(30 * audio_encoder.config.frame_rate ) __UpperCamelCase = True __UpperCamelCase = 3.0 if pytorch_dump_folder is not None: Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) if repo_id: logger.info(f"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(_UpperCamelCase ) processor.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) _A = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
505
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[int] = question_encoder snake_case_ : Optional[int] = generator snake_case_ : Optional[Any] = self.question_encoder def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' ) snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any: '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ ) if config is None: snake_case_ : int = RagConfig.from_pretrained(__magic_name__ ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' return self.generator.decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = self.question_encoder def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.generator def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding: '''simple docstring''' warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __magic_name__ , ) if max_length is None: snake_case_ : Dict = self.current_tokenizer.model_max_length snake_case_ : List[str] = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: snake_case_ : Optional[int] = self.current_tokenizer.model_max_length snake_case_ : Union[str, Any] = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) snake_case_ : str = labels['''input_ids'''] return model_inputs
60
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class lowercase__ ( unittest.TestCase ): def __init__( self , __UpperCAmelCase )-> int: '''simple docstring''' lowerCAmelCase__ = parent def UpperCAmelCase ( self )-> Dict: '''simple docstring''' return {} def _a ( ) -> List[Any]: """simple docstring""" lowerCAmelCase__ = '''<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>''' lowerCAmelCase__ = ''' <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> ''' return [html_string_a, html_string_a] @require_bsa class lowercase__ ( _a, unittest.TestCase ): a_ =MarkupLMFeatureExtractor if is_bsa_available() else None def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = MarkupLMFeatureExtractionTester(self ) @property def UpperCAmelCase ( self )-> int: '''simple docstring''' return self.feature_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = self.feature_extraction_class() # Test not batched input lowerCAmelCase__ = get_html_strings()[0] lowerCAmelCase__ = feature_extractor(__UpperCAmelCase ) # fmt: off lowerCAmelCase__ = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] lowerCAmelCase__ = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , __UpperCAmelCase ) self.assertEqual(encoding.xpaths , __UpperCAmelCase ) # Test batched lowerCAmelCase__ = get_html_strings() lowerCAmelCase__ = feature_extractor(__UpperCAmelCase ) # fmt: off lowerCAmelCase__ = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] lowerCAmelCase__ = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , __UpperCAmelCase ) self.assertEqual(encoding.xpaths , __UpperCAmelCase )
339
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Optional[Any] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : List[Any] = use_labels snake_case_ : Optional[int] = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : Any = hidden_act snake_case_ : List[str] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = type_sequence_label_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ : Any = (image_size // patch_size) ** 2 snake_case_ : int = num_patches + 1 def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : List[Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : int = self.get_config() return config, pixel_values, labels def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = ViTMSNModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = self.type_sequence_label_size snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : Optional[int] = 1 snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Any = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowerCamelCase_ : Optional[int] = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : List[Any] = ViTMSNModelTester(self ) snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(__magic_name__ ) snake_case_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[int] = [*signature.parameters.keys()] snake_case_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' torch.manual_seed(2 ) snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ ) snake_case_ : str = self.default_image_processor snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**__magic_name__ ) # verify the logits snake_case_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
60
0
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowerCamelCase : Tuple = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def A__ ( __A : Optional[int] , __A : Any ) ->List[str]: inspect_dataset(_UpperCamelCase , _UpperCamelCase ) __A =path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def A__ ( __A : Dict , __A : int ) ->Tuple: inspect_metric(_UpperCamelCase , _UpperCamelCase ) __A =path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def A__ ( __A : Union[str, Any] , __A : Union[str, Any] , __A : str ) ->Tuple: __A =get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def A__ ( __A : Dict , __A : Dict , __A : Optional[Any] ) ->Any: with pytest.raises(_UpperCamelCase ): get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def A__ ( __A : Union[str, Any] , __A : Optional[Any] ) ->Optional[Any]: __A =get_dataset_config_names(_UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def A__ ( __A : Optional[int] , __A : int , __A : int ) ->str: __A =get_dataset_infos(_UpperCamelCase ) assert list(infos.keys() ) == expected_configs __A =expected_configs[0] assert expected_config in infos __A =infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def A__ ( __A : Optional[int] , __A : List[str] , __A : Tuple ) ->int: __A =get_dataset_infos(_UpperCamelCase ) assert expected_config in infos __A =infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def A__ ( __A : str , __A : Any , __A : List[Any] ) ->Any: with pytest.raises(_UpperCamelCase ): get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
184
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[Any] = '''efficientnet''' def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[str] = num_channels snake_case_ : Tuple = image_size snake_case_ : Union[str, Any] = width_coefficient snake_case_ : Tuple = depth_coefficient snake_case_ : Optional[Any] = depth_divisor snake_case_ : Optional[int] = kernel_sizes snake_case_ : str = in_channels snake_case_ : Optional[Any] = out_channels snake_case_ : int = depthwise_padding snake_case_ : Optional[Any] = strides snake_case_ : Any = num_block_repeats snake_case_ : Optional[Any] = expand_ratios snake_case_ : Union[str, Any] = squeeze_expansion_ratio snake_case_ : Union[str, Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dim snake_case_ : Any = pooling_type snake_case_ : List[str] = initializer_range snake_case_ : str = batch_norm_eps snake_case_ : Optional[int] = batch_norm_momentum snake_case_ : Optional[Any] = dropout_rate snake_case_ : List[str] = drop_connect_rate snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4 class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-5
60
0
"""simple docstring""" from __future__ import annotations def lowercase__(A , A , A , A , A , ) ->None: """simple docstring""" lowercase__ : Dict= len(_UpperCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_UpperCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCamelCase , _UpperCamelCase , ) def lowercase__(A ) ->None: """simple docstring""" lowercase__ : list[list[str]]= [] depth_first_search([] , [] , [] , _UpperCamelCase , _UpperCamelCase ) # Print all the boards for board in boards: for column in board: print(_UpperCamelCase ) print("" ) print(len(_UpperCamelCase ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
218
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowerCAmelCase_ = logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int) lowerCAmelCase_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowerCAmelCase_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowerCAmelCase_ = Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase_ = [0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Any , __A : Any , __A : Dict=1_3 , __A : List[Any]=1_0 , __A : List[Any]=3 , __A : List[str]=2 , __A : Union[str, Any]=2 , __A : Union[str, Any]=True , __A : Optional[int]=True , __A : List[Any]=3_2 , __A : Optional[Any]=5 , __A : Dict=4 , __A : int=3_7 , __A : str="gelu" , __A : str=0.1 , __A : List[Any]=0.1 , __A : Any=1_0 , __A : str=0.0_2 , __A : Union[str, Any]="divided_space_time" , __A : Any=None , ): snake_case__ : Tuple = parent snake_case__ : Optional[Any] = batch_size snake_case__ : Union[str, Any] = image_size snake_case__ : Tuple = num_channels snake_case__ : int = patch_size snake_case__ : str = num_frames snake_case__ : str = is_training snake_case__ : Dict = use_labels snake_case__ : Optional[int] = hidden_size snake_case__ : Dict = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : Union[str, Any] = intermediate_size snake_case__ : int = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : Any = attention_probs_dropout_prob snake_case__ : Tuple = attention_type snake_case__ : Any = initializer_range snake_case__ : Optional[int] = scope snake_case__ : List[str] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token snake_case__ : Any = (image_size // patch_size) ** 2 snake_case__ : List[str] = (num_frames) * self.num_patches_per_frame + 1 def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) snake_case__ : List[str] = None if self.use_labels: snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[Any] = self.get_config() return config, pixel_values, labels def _lowercase ( self : List[str] ): snake_case__ : int = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) snake_case__ : Tuple = self.num_labels return config def _lowercase ( self : Optional[int] , __A : Union[str, Any] , __A : int , __A : Tuple ): snake_case__ : Dict = TimesformerModel(config=__A ) model.to(__A ) model.eval() snake_case__ : List[str] = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Any , __A : List[str] , __A : Union[str, Any] , __A : Union[str, Any] ): snake_case__ : Optional[int] = TimesformerForVideoClassification(__A ) model.to(__A ) model.eval() snake_case__ : Union[str, Any] = model(__A ) # verify the logits shape snake_case__ : Tuple = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __A ) def _lowercase ( self : Dict ): snake_case__ : str = self.prepare_config_and_inputs() snake_case__ : Union[str, Any] = config_and_inputs snake_case__ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ): """simple docstring""" a_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () a_ = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False def _lowercase ( self : Tuple ): snake_case__ : Union[str, Any] = TimesformerModelTester(self ) snake_case__ : Tuple = ConfigTester( self , config_class=__A , has_text_modality=__A , hidden_size=3_7 ) def _lowercase ( self : str , __A : Any , __A : List[Any] , __A : str=False ): snake_case__ : Tuple = copy.deepcopy(__A ) if return_labels: if model_class in get_values(__A ): snake_case__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _lowercase ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def _lowercase ( self : Optional[Any] ): pass def _lowercase ( self : str ): snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Tuple = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def _lowercase ( self : str ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Optional[int] = model_class(__A ) snake_case__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : List[str] = [*signature.parameters.keys()] snake_case__ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _lowercase ( self : Dict ): snake_case__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__A ) @slow def _lowercase ( self : List[Any] ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : Optional[Any] = TimesformerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def _lowercase ( self : List[Any] ): if not self.has_attentions: pass else: snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Union[str, Any] = True for model_class in self.all_model_classes: snake_case__ : List[Any] = self.model_tester.seq_length snake_case__ : List[Any] = self.model_tester.num_frames snake_case__ : List[str] = True snake_case__ : Union[str, Any] = False snake_case__ : List[str] = True snake_case__ : List[Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): snake_case__ : Optional[Any] = model(**self._prepare_for_class(__A , __A ) ) snake_case__ : Optional[Any] = outputs.attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case__ : Dict = True snake_case__ : Optional[int] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): snake_case__ : Tuple = model(**self._prepare_for_class(__A , __A ) ) snake_case__ : List[Any] = outputs.attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) snake_case__ : Dict = len(__A ) # Check attention is always last and order is fine snake_case__ : Dict = True snake_case__ : Any = True snake_case__ : int = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): snake_case__ : Dict = model(**self._prepare_for_class(__A , __A ) ) self.assertEqual(out_len + 1 , len(__A ) ) snake_case__ : Optional[Any] = outputs.attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _lowercase ( self : int ): def check_hidden_states_output(__A : str , __A : Optional[int] , __A : int ): snake_case__ : List[Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): snake_case__ : List[Any] = model(**self._prepare_for_class(__A , __A ) ) snake_case__ : str = outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__A ) , __A ) snake_case__ : Optional[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Optional[Any] = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[str] = True check_hidden_states_output(__A , __A , __A ) def SCREAMING_SNAKE_CASE ( ): snake_case__ : List[Any] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) snake_case__ : Dict = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Optional[Any] ): return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _lowercase ( self : List[Any] ): snake_case__ : Any = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( __A ) snake_case__ : List[str] = self.default_image_processor snake_case__ : List[str] = prepare_video() snake_case__ : int = image_processor(video[:8] , return_tensors="pt" ).to(__A ) # forward pass with torch.no_grad(): snake_case__ : Union[str, Any] = model(**__A ) # verify the logits snake_case__ : Any = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , __A ) snake_case__ : Any = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
297
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } snake_case_ : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): snake_case_ : int = token_dict['''token'''] snake_case_ : Optional[int] = Tokenizer(Unigram() ) snake_case_ : int = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) snake_case_ : Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ), pre_tokenizers.Digits(individual_digits=__magic_name__ ), pre_tokenizers.Punctuation(), ] ) snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ) snake_case_ : Optional[Any] = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) snake_case_ : Optional[Any] = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) if isinstance(__magic_name__ , __magic_name__ ): snake_case_ : Dict = [files] self._tokenizer.train(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int: '''simple docstring''' snake_case_ : Any = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = json.loads(self._tokenizer.to_str() ) snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id'''] snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
60
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase__ ( unittest.TestCase ): def A__ ( self ): _A : Union[str, Any] = 10 def A__ ( self ): _A : Optional[int] = [1, 2, 3, 4] _A : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(A__ ,self.block_size ,0 ) ,A__ ) def A__ ( self ): _A : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _A : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(A__ ,self.block_size ,0 ) ,A__ ) def A__ ( self ): _A : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _A : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(A__ ,self.block_size ,0 ) ,A__ ) def A__ ( self ): _A : Any = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' _A : Optional[int] = process_story(A__ ) self.assertEqual(A__ ,[] ) def A__ ( self ): _A : int = '''''' _A : List[str] = process_story(A__ ) self.assertEqual(A__ ,[] ) self.assertEqual(A__ ,[] ) def A__ ( self ): _A : Tuple = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) _A : List[Any] = process_story(A__ ) _A : Any = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(A__ ,A__ ) _A : int = ['''It was the best of times.'''] self.assertEqual(A__ ,A__ ) def A__ ( self ): _A : List[str] = torch.tensor([1, 2, 3, 4] ) _A : Any = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(A__ ,0 ).numpy() ,expected.numpy() ) def A__ ( self ): _A : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _A : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(A__ ,23 ).numpy() ,expected.numpy() ) def A__ ( self ): _A : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _A : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(A__ ,1 ).numpy() ,expected.numpy() ) def A__ ( self ): _A : List[str] = 101 _A : int = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _A : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _A : List[str] = compute_token_type_ids(A__ ,A__ ) np.testing.assert_array_equal(A__ ,A__ )
206
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = [False] * len(_UpperCamelCase ) snake_case_ : int = [-1] * len(_UpperCamelCase ) def dfs(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Dict = True snake_case_ : Dict = c for u in graph[v]: if not visited[u]: dfs(_UpperCamelCase , 1 - c ) for i in range(len(_UpperCamelCase ) ): if not visited[i]: dfs(_UpperCamelCase , 0 ) for i in range(len(_UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
60
0
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __magic_name__ ( _a ): UpperCamelCase : Optional[Any] = ComputeEnvironment.AMAZON_SAGEMAKER UpperCamelCase : int = True UpperCamelCase : Tuple = '''ml.p3.2xlarge''' UpperCamelCase : Any = '''accelerate_sagemaker_execution_role''' UpperCamelCase : Optional[int] = '''hf-sm''' UpperCamelCase : Optional[Any] = '''us-east-1''' UpperCamelCase : Optional[int] = 1 UpperCamelCase : List[str] = '''accelerate-sagemaker-1''' UpperCamelCase : List[Any] = '''1.6''' UpperCamelCase : Optional[int] = '''4.4''' UpperCamelCase : Dict = '''train.py''' UpperCamelCase : str = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] UpperCamelCase : List[Any] = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class __magic_name__ ( unittest.TestCase ): def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['model_name_or_path'] , __magic_name__ ) assert isinstance(converted_args['do_train'] , __magic_name__ ) assert isinstance(converted_args['epochs'] , __magic_name__ ) assert isinstance(converted_args['learning_rate'] , __magic_name__ ) assert isinstance(converted_args['max_steps'] , __magic_name__ ) with pytest.raises(__magic_name__ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
589
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int: '''simple docstring''' snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20} snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case_ : str = parent snake_case_ : Optional[int] = batch_size snake_case_ : Dict = num_channels snake_case_ : List[Any] = image_size snake_case_ : Union[str, Any] = min_resolution snake_case_ : Tuple = max_resolution snake_case_ : str = do_resize snake_case_ : Tuple = size snake_case_ : int = do_center_crop snake_case_ : Tuple = crop_size snake_case_ : int = do_normalize snake_case_ : Optional[Any] = image_mean snake_case_ : List[str] = image_std snake_case_ : str = do_reduce_labels def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] ) snake_case_ : str = Image.open(dataset[1]['''file'''] ) return image, map def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] ) snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] ) snake_case_ : List[str] = Image.open(ds[2]['''file'''] ) snake_case_ : str = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = BeitImageProcessingTester(self ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) snake_case_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) snake_case_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs() snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) snake_case_ : List[Any] = True snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
60
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class __magic_name__ ( _a ): _lowerCAmelCase = '''falcon''' _lowerCAmelCase = ['''past_key_values'''] def __init__( self : List[Any] , lowerCamelCase__ : Tuple=6_5_0_2_4 , lowerCamelCase__ : List[str]=4_5_4_4 , lowerCamelCase__ : Optional[Any]=3_2 , lowerCamelCase__ : str=7_1 , lowerCamelCase__ : List[str]=1E-5 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Optional[int]=1_1 , lowerCamelCase__ : int=1_1 , **lowerCamelCase__ : Dict , ): lowerCAmelCase : Optional[Any] = vocab_size # Backward compatibility with n_embed kwarg lowerCAmelCase : Dict = kwargs.pop('''n_embed''' , lowerCamelCase__ ) lowerCAmelCase : Optional[int] = hidden_size if n_embed is None else n_embed lowerCAmelCase : List[str] = num_hidden_layers lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Optional[Any] = layer_norm_epsilon lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : List[str] = use_cache lowerCAmelCase : Optional[int] = hidden_dropout lowerCAmelCase : List[Any] = attention_dropout lowerCAmelCase : Any = bos_token_id lowerCAmelCase : Dict = eos_token_id lowerCAmelCase : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads lowerCAmelCase : Optional[Any] = alibi lowerCAmelCase : Optional[int] = new_decoder_architecture lowerCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True lowerCAmelCase : List[str] = parallel_attn lowerCAmelCase : List[str] = bias super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) @property def _A ( self : str ): return self.hidden_size // self.num_attention_heads @property def _A ( self : int ): return not self.alibi
348
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : List[Any] = mean_squared_error( __magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ ) return {"mse": mse}
60
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _a : Dict = None _a : Any = logging.get_logger(__name__) _a : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} _a : Any = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } _a : Tuple = { 'moussaKam/mbarthez': 1_024, 'moussaKam/barthez': 1_024, 'moussaKam/barthez-orangesum-title': 1_024, } _a : List[Any] = '▁' class __A ( _a ): _UpperCamelCase : Tuple = VOCAB_FILES_NAMES _UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : Union[str, Any] = BarthezTokenizer def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ): _lowerCAmelCase : Any = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token super().__init__( a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , ) _lowerCAmelCase : Tuple = vocab_file _lowerCAmelCase : str = False if not self.vocab_file else True def __A ( self , a__ , a__ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase : Tuple = [self.cls_token_id] _lowerCAmelCase : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __A ( self , a__ , a__ = None ): _lowerCAmelCase : List[Any] = [self.sep_token_id] _lowerCAmelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self , a__ , a__ = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(a__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase : str = os.path.join( a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ): copyfile(self.vocab_file , a__ ) return (out_vocab_file,)
213
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : lowerCamelCase_ : Any = None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
60
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : List[Any] = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : int ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Tuple ): if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # New Code # __lowerCamelCase = int(args.gradient_accumulation_steps ) __lowerCamelCase = int(args.local_sgd_steps ) # Initialize accelerator __lowerCamelCase = Accelerator( cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=_UpperCamelCase ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) set_seed(_UpperCamelCase ) __lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() with LocalSGD( accelerator=_UpperCamelCase ,model=_UpperCamelCase ,local_sgd_steps=_UpperCamelCase ,enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCamelCase ): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = output.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,) parser.add_argument( '''--local_sgd_steps''' ,type=_UpperCamelCase ,default=8 ,help='''Number of local SGD steps or None to disable local SGD''' ) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
175
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : str = None @staticmethod def lowerCamelCase () -> Any: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase (cls ) -> List[Any]: '''simple docstring''' return F'''`pip install {cls.pip_package or cls.name}`''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[int] = '''optuna''' @staticmethod def lowerCamelCase () -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_optuna(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''ray''' lowerCamelCase_ : List[str] = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase () -> List[Any]: '''simple docstring''' return is_ray_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_ray(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''sigopt''' @staticmethod def lowerCamelCase () -> Optional[int]: '''simple docstring''' return is_sigopt_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' return default_hp_space_sigopt(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''wandb''' @staticmethod def lowerCamelCase () -> Dict: '''simple docstring''' return is_wandb_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return default_hp_space_wandb(__magic_name__ ) lowerCAmelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_UpperCamelCase ) > 0: snake_case_ : Dict = available_backends[0].name if len(_UpperCamelCase ) > 1: logger.info( f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
60
0
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
245
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list: """simple docstring""" snake_case_ : Tuple = len(_UpperCamelCase ) snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): snake_case_ : Any = y_points[i] for i in range(2 , _UpperCamelCase ): for j in range(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[int] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
60
0
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowercase (_snake_case ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = [False] * len(_UpperCamelCase ) __UpperCamelCase = [-1] * len(_UpperCamelCase ) def dfs(_snake_case ,_snake_case ): __UpperCamelCase = True __UpperCamelCase = c for u in graph[v]: if not visited[u]: dfs(_UpperCamelCase ,1 - c ) for i in range(len(_UpperCamelCase ) ): if not visited[i]: dfs(_UpperCamelCase ,0 ) for i in range(len(_UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
505
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def _a ( UpperCamelCase_ : str ) -> List[Any]: """simple docstring""" lowerCAmelCase__ = [] embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight", F"stage{idx}.patch_embed.proj.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias", F"stage{idx}.patch_embed.proj.bias", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight", F"stage{idx}.patch_embed.norm.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias", F"stage{idx}.patch_embed.norm.bias", ) ) return embed def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]: """simple docstring""" lowerCAmelCase__ = [] attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight", F"stage{idx}.blocks.{cnt}.attn.proj_q.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias", F"stage{idx}.blocks.{cnt}.attn.proj_q.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight", F"stage{idx}.blocks.{cnt}.attn.proj_k.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias", F"stage{idx}.blocks.{cnt}.attn.proj_k.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight", F"stage{idx}.blocks.{cnt}.attn.proj_v.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias", F"stage{idx}.blocks.{cnt}.attn.proj_v.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight", F"stage{idx}.blocks.{cnt}.attn.proj.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias", F"stage{idx}.blocks.{cnt}.attn.proj.bias", ) ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") ) return attention_weights def _a ( UpperCamelCase_ : List[Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = [] token.append((F"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token") ) return token def _a ( ) -> Dict: """simple docstring""" lowerCAmelCase__ = [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ = '''imagenet-1k-id2label.json''' lowerCAmelCase__ = 1_000 lowerCAmelCase__ = '''huggingface/label-files''' lowerCAmelCase__ = num_labels lowerCAmelCase__ = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) ) , "r" ) ) lowerCAmelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = CvtConfig(num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowerCAmelCase__ = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowerCAmelCase__ = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowerCAmelCase__ = [2, 2, 20] lowerCAmelCase__ = [3, 12, 16] lowerCAmelCase__ = [192, 768, 1_024] lowerCAmelCase__ = CvtForImageClassification(_UpperCamelCase ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowerCAmelCase__ = image_size lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) ) lowerCAmelCase__ = OrderedDict() lowerCAmelCase__ = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowerCAmelCase__ = list_of_state_dict + cls_token(_UpperCamelCase ) lowerCAmelCase__ = list_of_state_dict + embeddings(_UpperCamelCase ) for cnt in range(config.depth[idx] ): lowerCAmelCase__ = list_of_state_dict + attention(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase__ = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) ): lowerCAmelCase__ = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) image_processor.save_pretrained(_UpperCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
339
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return getitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" return setitem, k, v def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" return delitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str: """simple docstring""" try: return fun(_UpperCamelCase , *_UpperCamelCase ), None except Exception as e: return None, e lowerCAmelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCAmelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Any = HashMap(initial_block_size=4 ) snake_case_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(_UpperCamelCase ): snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) assert my_res == py_res assert str(_UpperCamelCase ) == str(_UpperCamelCase ) assert set(_UpperCamelCase ) == set(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" def is_public(_UpperCamelCase ) -> bool: return not name.startswith('''_''' ) snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )} snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )} assert dict_public_names > hash_public_names
60
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : int = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
184
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase ) -> list: """simple docstring""" if len(_UpperCamelCase ) == 0: return [] snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase ) snake_case_ : List[str] = int(max_value - min_value ) + 1 snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )] for i in my_list: buckets[int(i - min_value )].append(_UpperCamelCase ) return [v for bucket in buckets for v in sorted(_UpperCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
60
0
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowercase__(A ) ->Any: """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowercase__() ->Union[str, Any]: """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowercase__() ->Tuple: """simple docstring""" lowercase__ : str= '''mock-s3-bucket''' lowercase__ : str= f'''s3://{mock_bucket}''' lowercase__ : Any= extract_path_from_uri(_UpperCamelCase ) assert dataset_path.startswith("s3://" ) is False lowercase__ : Optional[Any]= '''./local/path''' lowercase__ : List[str]= extract_path_from_uri(_UpperCamelCase ) assert dataset_path == new_dataset_path def lowercase__(A ) ->str: """simple docstring""" lowercase__ : Union[str, Any]= is_remote_filesystem(_UpperCamelCase ) assert is_remote is True lowercase__ : Union[str, Any]= fsspec.filesystem("file" ) lowercase__ : int= is_remote_filesystem(_UpperCamelCase ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , _UpperCamelCase ) def lowercase__(A , A , A , A , A , A , A ) ->Tuple: """simple docstring""" lowercase__ : Optional[Any]= {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} lowercase__ : Optional[Any]= input_paths[compression_fs_class.protocol] if input_path is None: lowercase__ : List[Any]= f'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_UpperCamelCase ) lowercase__ : Dict= fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase ) assert isinstance(_UpperCamelCase , _UpperCamelCase ) lowercase__ : int= os.path.basename(_UpperCamelCase ) lowercase__ : Any= expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(_UpperCamelCase , "r" , encoding="utf-8" ) as f, open(_UpperCamelCase , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def lowercase__(A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : Union[str, Any]= {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} lowercase__ : Any= compressed_file_paths[protocol] lowercase__ : Any= '''dataset.jsonl''' lowercase__ : Dict= f'''{protocol}://{member_file_path}::{compressed_file_path}''' lowercase__ : Optional[Any]= fsspec.get_fs_token_paths(_UpperCamelCase ) assert fs.isfile(_UpperCamelCase ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def lowercase__(A , A , A , A ) ->Dict: """simple docstring""" lowercase__ : Optional[int]= hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase ) lowercase__ : List[str]= HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(_UpperCamelCase ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def lowercase__() ->Any: """simple docstring""" lowercase__ : Tuple= '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase ) with pytest.warns(_UpperCamelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_UpperCamelCase ) == 1 assert ( str(warning_info[0].message ) == f'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
218
import tensorflow as tf from ...tf_utils import shape_list class __lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[Any] = vocab_size snake_case_ : Dict = d_embed snake_case_ : Union[str, Any] = d_proj snake_case_ : str = cutoffs + [vocab_size] snake_case_ : int = [0] + self.cutoffs snake_case_ : Optional[int] = div_val snake_case_ : int = self.cutoffs[0] snake_case_ : Any = len(self.cutoffs ) - 1 snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters snake_case_ : str = keep_order snake_case_ : int = [] snake_case_ : Union[str, Any] = [] def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' if self.n_clusters > 0: snake_case_ : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case_ : List[str] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , ) self.out_projs.append(__magic_name__ ) else: self.out_projs.append(__magic_name__ ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : List[str] = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i) snake_case_ : int = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' ) self.out_projs.append(__magic_name__ ) snake_case_ : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : Any = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(__magic_name__ ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = x if proj is not None: snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ ) return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = shape_list(__magic_name__ ) snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) snake_case_ : Dict = tf.stack([r, target] , 1 ) return tf.gather_nd(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = 0 if self.n_clusters == 0: snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ ) snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 ) else: snake_case_ : Optional[int] = shape_list(__magic_name__ ) snake_case_ : int = [] snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case_ : str = (target >= l_idx) & (target < r_idx) snake_case_ : Dict = tf.where(__magic_name__ ) snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx if self.div_val == 1: snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx] snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx] else: snake_case_ : Union[str, Any] = self.out_layers[i][0] snake_case_ : int = self.out_layers[i][1] if i == 0: snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 ) snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] ) snake_case_ : Any = tf.nn.log_softmax(__magic_name__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ ) else: snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] ) snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ ) snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__magic_name__ ) if target is not None: snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) ) snake_case_ : str = tf.concat(__magic_name__ , axis=-1 ) if target is not None: if return_mean: snake_case_ : int = tf.reduce_mean(__magic_name__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__magic_name__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
60
0
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ): snake_case__ : str = [1] snake_case__ : Tuple = 0, 0, 0 snake_case__ : Optional[int] = ugly_nums[ia] * 2 snake_case__ : List[str] = ugly_nums[ia] * 3 snake_case__ : Union[str, Any] = ugly_nums[ia] * 5 for _ in range(1 , _UpperCamelCase ): snake_case__ : Dict = min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ugly_nums.append(_UpperCamelCase ) if next_num == next_a: ia += 1 snake_case__ : Optional[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 snake_case__ : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 snake_case__ : Optional[Any] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f"{ugly_numbers(200) = }")
297
import requests def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Tuple = {'''Content-Type''': '''application/json'''} snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase ) if response.status_code != 200: snake_case_ : List[Any] = ( '''Request to slack returned an error ''' f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(_UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
60
0
_UpperCamelCase : Optional[int] =[ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def a__ (__lowercase :List[str] , __lowercase :Tuple , __lowercase :int , __lowercase :Optional[int] ) -> Optional[int]: _A : int = [False] * len(_UpperCamelCase ) _A : str = [s] _A : Tuple = True while queue: _A : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) _A : List[Any] = True _A : str = u return visited[t] def a__ (__lowercase :Any , __lowercase :Optional[int] , __lowercase :int ) -> Any: _A : int = [-1] * (len(_UpperCamelCase )) _A : List[Any] = 0 _A : List[Any] = [] _A : List[Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _A : List[Any] = float('''Inf''' ) _A : Tuple = sink while s != source: # Find the minimum value in select path _A : Tuple = min(_UpperCamelCase , graph[parent[s]][s] ) _A : Tuple = parent[s] max_flow += path_flow _A : str = sink while v != source: _A : str = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _A : Dict = parent[v] for i in range(len(_UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
206
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A__ ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_UpperCamelCase ): requests.request('GET', 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET', 'https://huggingface.co', timeout=1.0 ) @pytest.mark.integration def A__ ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET', 'https://huggingface.co' ) def A__ ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_UpperCamelCase ): http_head('https://huggingface.co' )
589
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''owlvit_text_model''' def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : int = vocab_size snake_case_ : str = hidden_size snake_case_ : List[Any] = intermediate_size snake_case_ : str = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : Union[str, Any] = initializer_range snake_case_ : int = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit_vision_model''' def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : Optional[Any] = hidden_size snake_case_ : Union[str, Any] = intermediate_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : List[Any] = num_channels snake_case_ : Union[str, Any] = image_size snake_case_ : Dict = patch_size snake_case_ : List[Any] = hidden_act snake_case_ : Tuple = layer_norm_eps snake_case_ : Dict = attention_dropout snake_case_ : List[str] = initializer_range snake_case_ : List[Any] = initializer_factor @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": snake_case_ : str = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''owlvit''' lowerCamelCase_ : Optional[int] = True def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int: '''simple docstring''' super().__init__(**__magic_name__ ) if text_config is None: snake_case_ : Tuple = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: snake_case_ : str = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) snake_case_ : str = OwlViTTextConfig(**__magic_name__ ) snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ ) snake_case_ : Any = projection_dim snake_case_ : Union[str, Any] = logit_scale_init_value snake_case_ : str = return_dict snake_case_ : Any = 1.0 @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__magic_name__ ) snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str: '''simple docstring''' snake_case_ : Optional[int] = {} snake_case_ : Union[str, Any] = text_config snake_case_ : Optional[Any] = vision_config return cls.from_dict(__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : List[Any] = self.text_config.to_dict() snake_case_ : List[Any] = self.vision_config.to_dict() snake_case_ : Tuple = self.__class__.model_type return output class __lowerCAmelCase ( _a ): @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-4 def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : Dict = super().generate_dummy_inputs( processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ ) snake_case_ : List[str] = super().generate_dummy_inputs( processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ ) return {**text_input_dict, **image_input_dict} @property def lowerCamelCase (self ) -> int: '''simple docstring''' return 14
60
0
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ ( _a, _a, unittest.TestCase ): _lowerCAmelCase = IFInpaintingPipeline _lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def _A ( self : List[str] ): return self._get_dummy_components() def _A ( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=0 ): if str(lowerCamelCase__ ).startswith('''mps''' ): lowerCAmelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ ) else: lowerCAmelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) lowerCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) lowerCAmelCase : Dict = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _A ( self : Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _A ( self : List[str] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def _A ( self : str ): super().test_save_load_floataa(expected_max_diff=1E-1 ) def _A ( self : List[Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _A ( self : Dict ): self._test_save_load_local() def _A ( self : Optional[int] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
348
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch'''] lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate''' lowerCamelCase_ : Tuple = '''default_config.yaml''' lowerCamelCase_ : str = config_folder / config_file lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml''' lowerCamelCase_ : Dict = Path('''tests/test_configs''' ) @classmethod def lowerCamelCase (cls ) -> Dict: '''simple docstring''' if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCamelCase (cls ) -> Any: '''simple docstring''' if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=__magic_name__ ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : List[str] = '''test-tpu''' lowerCamelCase_ : Dict = '''us-central1-a''' lowerCamelCase_ : Any = '''ls''' lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config'''] lowerCamelCase_ : Tuple = '''cd /usr/share''' lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh''' lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh''' def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Tuple = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
60
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _a : int = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ['ViTFeatureExtractor'] _a : Any = ['ViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ 'VIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTForImageClassification', 'ViTForMaskedImageModeling', 'ViTModel', 'ViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = [ 'TFViTForImageClassification', 'TFViTModel', 'TFViTPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ 'FlaxViTForImageClassification', 'FlaxViTModel', 'FlaxViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
213
import warnings from ..trainer import Trainer from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict: '''simple docstring''' warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , __magic_name__ , ) super().__init__(args=__magic_name__ , **__magic_name__ )
60
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter a_ = True except ImportError: a_ = False a_ = logging.get_logger(__name__) # pylint: disable=invalid-name def a__ ( _UpperCamelCase : Union[str, Any] ): return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path ) class __lowerCAmelCase ( _a ): @staticmethod def lowerCamelCase ( __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=__UpperCAmelCase , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=__UpperCAmelCase , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=__UpperCAmelCase ) def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , *__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = testing __lowerCamelCase = testing_file __lowerCamelCase = path def lowerCamelCase ( self ): '''simple docstring''' warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory __lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(__UpperCAmelCase ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) __lowerCamelCase = ( Path(__UpperCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) __lowerCamelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(__UpperCAmelCase ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: __lowerCamelCase = json.load(__UpperCAmelCase ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__UpperCAmelCase , extra_context=__UpperCAmelCase , ) __lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: __lowerCamelCase = json.load(__UpperCAmelCase ) __lowerCamelCase = configuration['''lowercase_modelname'''] __lowerCamelCase = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(F"""{directory}/configuration.json""" ) __lowerCamelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__UpperCAmelCase ) # Tests require submodules as they have parent imports with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ): pass shutil.move( F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , ) shutil.move( F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(__UpperCAmelCase ): with open(__UpperCAmelCase , '''r''' ) as f: __lowerCamelCase = f.readlines() with open(__UpperCAmelCase , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__UpperCAmelCase ) if output_pytorch: if not self._testing: remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" ) shutil.move( F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" ) os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" ) if output_tensorflow: if not self._testing: remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) shutil.move( F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ) if output_flax: if not self._testing: remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) shutil.move( F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ) shutil.move( F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): # Create temp file __lowerCamelCase = mkstemp() __lowerCamelCase = False with fdopen(__UpperCAmelCase , '''w''' ) as new_file: with open(__UpperCAmelCase ) as old_file: for line in old_file: new_file.write(__UpperCAmelCase ) if line_to_copy_below in line: __lowerCamelCase = True for line_to_copy in lines_to_copy: new_file.write(__UpperCAmelCase ) if not line_found: raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" ) # Copy the file permissions from the old file to the new file copymode(__UpperCAmelCase , __UpperCAmelCase ) # Remove original file remove(__UpperCAmelCase ) # Move new file move(__UpperCAmelCase , __UpperCAmelCase ) def skip_units(__UpperCAmelCase ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__UpperCAmelCase ): with open(__UpperCAmelCase ) as datafile: __lowerCamelCase = [] __lowerCamelCase = False __lowerCamelCase = False for line in datafile: if "# To replace in: " in line and "##" not in line: __lowerCamelCase = line.split('''"''' )[1] __lowerCamelCase = skip_units(__UpperCAmelCase ) elif "# Below: " in line and "##" not in line: __lowerCamelCase = line.split('''"''' )[1] __lowerCamelCase = skip_units(__UpperCAmelCase ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = [] elif "# Replace with" in line and "##" not in line: __lowerCamelCase = [] elif "##" not in line: lines_to_copy.append(__UpperCAmelCase ) remove(__UpperCAmelCase ) replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" ) os.rmdir(__UpperCAmelCase )
175
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : str = '''mock-s3-bucket''' snake_case_ : str = f'''s3://{mock_bucket}''' snake_case_ : Any = extract_path_from_uri(_UpperCamelCase ) assert dataset_path.startswith('''s3://''' ) is False snake_case_ : Optional[Any] = '''./local/path''' snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase ) assert dataset_path == new_dataset_path def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase ) assert is_remote is True snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' ) snake_case_ : int = is_remote_filesystem(_UpperCamelCase ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol] if input_path is None: snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_UpperCamelCase ) snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase ) assert isinstance(_UpperCamelCase , _UpperCamelCase ) snake_case_ : int = os.path.basename(_UpperCamelCase ) snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} snake_case_ : Any = compressed_file_paths[protocol] snake_case_ : Any = '''dataset.jsonl''' snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}''' snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase ) assert fs.isfile(_UpperCamelCase ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase ) snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(_UpperCamelCase ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Tuple = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase ) with pytest.warns(_UpperCamelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_UpperCamelCase ) == 1 assert ( str(warning_info[0].message ) == f'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
60
0
'''simple docstring''' import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class a__ : _SCREAMING_SNAKE_CASE : Any = None def _lowerCamelCase ( self ): """simple docstring""" _lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) _lowercase : List[Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , _UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : Optional[int] = os.path.join(_UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(_UpperCamelCase ) _lowercase : str = self.feature_extraction_class.from_json_file(_UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : str = feat_extract_first.save_pretrained(_UpperCamelCase )[0] check_json_file_has_correct_format(_UpperCamelCase ) _lowercase : Dict = self.feature_extraction_class.from_pretrained(_UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Tuple = self.feature_extraction_class() self.assertIsNotNone(_UpperCamelCase )
245
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[Any] = '''encoder-decoder''' lowerCamelCase_ : Optional[Any] = True def __init__(self , **__magic_name__ ) -> Optional[int]: '''simple docstring''' super().__init__(**__magic_name__ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case_ : Any = kwargs.pop('''encoder''' ) snake_case_ : Tuple = encoder_config.pop('''model_type''' ) snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' ) snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) snake_case_ : Any = True @classmethod def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case_ : Tuple = True snake_case_ : Optional[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : str = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.encoder.to_dict() snake_case_ : Dict = self.decoder.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
60
0
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCAmelCase ( _a ): """simple docstring""" def __init__( self : Tuple , A_ : List[str] , A_ : Any=13 , A_ : List[str]=7 , A_ : Tuple=True , A_ : Dict=True , A_ : Any=False , A_ : Optional[int]=True , A_ : Tuple=99 , A_ : List[Any]=32 , A_ : Tuple=5 , A_ : Optional[int]=4 , A_ : Optional[int]=37 , A_ : Any="gelu" , A_ : Optional[int]=0.1 , A_ : Dict=0.1 , A_ : List[Any]=5_12 , A_ : str=16 , A_ : Optional[int]=2 , A_ : Dict=0.02 , A_ : List[str]=3 , A_ : int=4 , A_ : Optional[int]=None , )-> Dict: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope def A ( self : Optional[Any] )-> str: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Any )-> Optional[int]: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A ( self : List[Any] , A_ : Union[str, Any] , A_ : int , A_ : Dict , A_ : str , A_ : Union[str, Any] , A_ : Any )-> Optional[Any]: __UpperCamelCase = DistilBertModel(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ , A_ ) __UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[str] , A_ : Optional[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : List[str] , A_ : int , A_ : List[Any] )-> Any: __UpperCamelCase = DistilBertForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[str] , A_ : List[Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : Any )-> int: __UpperCamelCase = DistilBertForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model( A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : str , A_ : int , A_ : Any , A_ : List[str] , A_ : List[Any] , A_ : Dict , A_ : Optional[Any] )-> Any: __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , A_ : List[str] , A_ : Union[str, Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Optional[int] )-> str: __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Any , A_ : Union[str, Any] , A_ : Dict , A_ : List[Any] , A_ : List[Any] , A_ : Dict , A_ : str )-> List[str]: __UpperCamelCase = self.num_choices __UpperCamelCase = DistilBertForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = model( A_ , attention_mask=A_ , labels=A_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Dict )-> Union[str, Any]: __UpperCamelCase = self.prepare_config_and_inputs() (__UpperCamelCase) = config_and_inputs __UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCAmelCase ( _a , _a , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _snake_case : str = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Optional[Any] = True _snake_case : List[Any] = True _snake_case : int = True _snake_case : str = True def A ( self : Union[str, Any] )-> Optional[int]: __UpperCamelCase = DistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=A_ , dim=37 ) def A ( self : Tuple )-> int: self.config_tester.run_common_tests() def A ( self : List[str] )-> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*A_ ) def A ( self : int )-> Any: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*A_ ) def A ( self : Any )-> List[Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*A_ ) def A ( self : Dict )-> Optional[Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*A_ ) def A ( self : Union[str, Any] )-> List[Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*A_ ) def A ( self : List[Any] )-> List[str]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*A_ ) @slow def A ( self : List[str] )-> Tuple: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = DistilBertModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @slow @require_torch_gpu def A ( self : Optional[Any] )-> Dict: __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __UpperCamelCase = True __UpperCamelCase = model_class(config=A_ ) __UpperCamelCase = self._prepare_for_class(A_ , A_ ) __UpperCamelCase = torch.jit.trace( A_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A_ , os.path.join(A_ , "traced_model.pt" ) ) __UpperCamelCase = torch.jit.load(os.path.join(A_ , "traced_model.pt" ) , map_location=A_ ) loaded(inputs_dict["input_ids"].to(A_ ) , inputs_dict["attention_mask"].to(A_ ) ) @require_torch class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def A ( self : int )-> Tuple: __UpperCamelCase = DistilBertModel.from_pretrained("distilbert-base-uncased" ) __UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCamelCase = model(A_ , attention_mask=A_ )[0] __UpperCamelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , A_ ) __UpperCamelCase = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
505
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[int] = question_encoder snake_case_ : Optional[int] = generator snake_case_ : Optional[Any] = self.question_encoder def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' ) snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any: '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ ) if config is None: snake_case_ : int = RagConfig.from_pretrained(__magic_name__ ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' return self.generator.decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = self.question_encoder def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.generator def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding: '''simple docstring''' warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __magic_name__ , ) if max_length is None: snake_case_ : Dict = self.current_tokenizer.model_max_length snake_case_ : List[str] = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: snake_case_ : Optional[int] = self.current_tokenizer.model_max_length snake_case_ : Union[str, Any] = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) snake_case_ : str = labels['''input_ids'''] return model_inputs
60
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a_ = logging.get_logger(__name__) a_ = {'''tokenizer_file''': '''tokenizer.json'''} a_ = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class lowercase__ ( _a ): a_ =VOCAB_FILES_NAMES a_ =PRETRAINED_VOCAB_FILES_MAP a_ =['''input_ids''', '''attention_mask'''] a_ =None def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , )-> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase ) != add_prefix_space: lowerCAmelCase__ = getattr(__UpperCAmelCase , pre_tok_state.pop("type" ) ) lowerCAmelCase__ = add_prefix_space lowerCAmelCase__ = pre_tok_class(**__UpperCAmelCase ) lowerCAmelCase__ = add_prefix_space def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> BatchEncoding: '''simple docstring''' lowerCAmelCase__ = kwargs.get("is_split_into_words" , __UpperCAmelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" " pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> BatchEncoding: '''simple docstring''' lowerCAmelCase__ = kwargs.get("is_split_into_words" , __UpperCAmelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" " pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]: '''simple docstring''' lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> List[int]: '''simple docstring''' lowerCAmelCase__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: lowerCAmelCase__ = input_ids[-self.model_max_length :] return input_ids
339
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Optional[Any] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : List[Any] = use_labels snake_case_ : Optional[int] = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : Any = hidden_act snake_case_ : List[str] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = type_sequence_label_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ : Any = (image_size // patch_size) ** 2 snake_case_ : int = num_patches + 1 def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : List[Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : int = self.get_config() return config, pixel_values, labels def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = ViTMSNModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = self.type_sequence_label_size snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : Optional[int] = 1 snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Any = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowerCamelCase_ : Optional[int] = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : List[Any] = ViTMSNModelTester(self ) snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(__magic_name__ ) snake_case_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[int] = [*signature.parameters.keys()] snake_case_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' torch.manual_seed(2 ) snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ ) snake_case_ : str = self.default_image_processor snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**__magic_name__ ) # verify the logits snake_case_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
60
0
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): '''simple docstring''' __A =['''a''', '''b''', '''c'''] # Defaults to last layer if both are None __A =get_aligned_output_features_output_indices(lowercase__ , lowercase__ , lowercase__ ) self.assertEqual(lowercase__ , ['''c'''] ) self.assertEqual(lowercase__ , [2] ) # Out indices set to match out features __A =get_aligned_output_features_output_indices(['''a''', '''c'''] , lowercase__ , lowercase__ ) self.assertEqual(lowercase__ , ['''a''', '''c'''] ) self.assertEqual(lowercase__ , [0, 2] ) # Out features set to match out indices __A =get_aligned_output_features_output_indices(lowercase__ , [0, 2] , lowercase__ ) self.assertEqual(lowercase__ , ['''a''', '''c'''] ) self.assertEqual(lowercase__ , [0, 2] ) # Out features selected from negative indices __A =get_aligned_output_features_output_indices(lowercase__ , [-3, -1] , lowercase__ ) self.assertEqual(lowercase__ , ['''a''', '''c'''] ) self.assertEqual(lowercase__ , [-3, -1] ) def __UpperCamelCase ( self ): '''simple docstring''' with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , lowercase__ ) # Out features must be a list with self.assertRaises(lowercase__ ): verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] ) # Out features must be a subset of stage names with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] ) # Out indices must be a list or tuple with self.assertRaises(lowercase__ ): verify_out_features_out_indices(lowercase__ , 0 , ['''a''', '''b'''] ) # Out indices must be a subset of stage names with self.assertRaises(lowercase__ ): verify_out_features_out_indices(lowercase__ , (0, 1) , ['''a'''] ) # Out features and out indices must be the same length with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] ) # Out features should match out indices with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] ) # Out features and out indices should be in order with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] ) # Check passes with valid inputs verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] ) def __UpperCamelCase ( self ): '''simple docstring''' __A =BackboneMixin() __A =['''a''', '''b''', '''c'''] __A =['''a''', '''c'''] __A =[0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly __A =['''a''', '''b'''] self.assertEqual(backbone.out_features , ['''a''', '''b'''] ) self.assertEqual(backbone.out_indices , [0, 1] ) __A =[-3, -1] self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [-3, -1] )
184
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[Any] = '''efficientnet''' def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[str] = num_channels snake_case_ : Tuple = image_size snake_case_ : Union[str, Any] = width_coefficient snake_case_ : Tuple = depth_coefficient snake_case_ : Optional[Any] = depth_divisor snake_case_ : Optional[int] = kernel_sizes snake_case_ : str = in_channels snake_case_ : Optional[Any] = out_channels snake_case_ : int = depthwise_padding snake_case_ : Optional[Any] = strides snake_case_ : Any = num_block_repeats snake_case_ : Optional[Any] = expand_ratios snake_case_ : Union[str, Any] = squeeze_expansion_ratio snake_case_ : Union[str, Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dim snake_case_ : Any = pooling_type snake_case_ : List[str] = initializer_range snake_case_ : str = batch_norm_eps snake_case_ : Optional[int] = batch_norm_momentum snake_case_ : Optional[Any] = dropout_rate snake_case_ : List[str] = drop_connect_rate snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4 class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-5
60
0
"""simple docstring""" from sklearn.metrics import matthews_corrcoef import datasets a : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ a : int = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 """ a : Union[str, Any] = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCAmelCase( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" ] , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' return { "matthews_correlation": float(matthews_corrcoef(snake_case__ , snake_case__ , sample_weight=snake_case__ ) ), }
218
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowerCAmelCase_ = logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int) lowerCAmelCase_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowerCAmelCase_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowerCAmelCase_ = Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase_ = [0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Union[str, Any] = { """configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""], """processing_speech_to_text""": ["""Speech2TextProcessor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ["""Speech2TextTokenizer"""] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = ["""Speech2TextFeatureExtractor"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSpeech2TextForConditionalGeneration""", """TFSpeech2TextModel""", """TFSpeech2TextPreTrainedModel""", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ """SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Speech2TextForConditionalGeneration""", """Speech2TextModel""", """Speech2TextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
297
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } snake_case_ : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): snake_case_ : int = token_dict['''token'''] snake_case_ : Optional[int] = Tokenizer(Unigram() ) snake_case_ : int = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) snake_case_ : Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ), pre_tokenizers.Digits(individual_digits=__magic_name__ ), pre_tokenizers.Punctuation(), ] ) snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ) snake_case_ : Optional[Any] = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) snake_case_ : Optional[Any] = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) if isinstance(__magic_name__ , __magic_name__ ): snake_case_ : Dict = [files] self._tokenizer.train(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int: '''simple docstring''' snake_case_ : Any = trainers.UnigramTrainer( vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , ) self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ ) self.add_unk_id() def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = json.loads(self._tokenizer.to_str() ) snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id'''] snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
60
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase : List[str] =logging.get_logger(__name__) _UpperCamelCase : int ='▁' _UpperCamelCase : Any ={'vocab_file': 'spiece.model'} _UpperCamelCase : List[Any] ={ 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } _UpperCamelCase : List[Any] ={ 'google/reformer-crime-and-punishment': 524288, } class UpperCAmelCase__ ( _a ): __snake_case : Optional[int] = VOCAB_FILES_NAMES __snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case : List[Any] = ['''input_ids''', '''attention_mask'''] def __init__( self ,A__ ,A__="</s>" ,A__="<unk>" ,A__=[] ,A__ = None ,**A__ ,): _A : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=A__ ,unk_token=A__ ,additional_special_tokens=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,) _A : int = vocab_file _A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A__ ) @property def A__ ( self ): return self.sp_model.get_piece_size() def A__ ( self ): _A : Union[str, Any] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): _A : Any = self.__dict__.copy() _A : Any = None return state def __setstate__( self ,A__ ): _A : Tuple = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): _A : Optional[Any] = {} _A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A__ ( self ,A__ ): return self.sp_model.encode(A__ ,out_type=A__ ) def A__ ( self ,A__ ): return self.sp_model.piece_to_id(A__ ) def A__ ( self ,A__ ): if index < self.sp_model.get_piece_size(): _A : List[Any] = self.sp_model.IdToPiece(A__ ) return token def A__ ( self ,A__ ): _A : Dict = [] _A : Optional[int] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A__ ) + token _A : str = [] else: current_sub_tokens.append(A__ ) out_string += self.sp_model.decode(A__ ) return out_string.strip() def A__ ( self ,A__ ,A__ = None ): if not os.path.isdir(A__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _A : str = os.path.join( A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A__ ) elif not os.path.isfile(self.vocab_file ): with open(A__ ,'''wb''' ) as fi: _A : Any = self.sp_model.serialized_model_proto() fi.write(A__ ) return (out_vocab_file,)
206
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = [False] * len(_UpperCamelCase ) snake_case_ : int = [-1] * len(_UpperCamelCase ) def dfs(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Dict = True snake_case_ : Dict = c for u in graph[v]: if not visited[u]: dfs(_UpperCamelCase , 1 - c ) for i in range(len(_UpperCamelCase ) ): if not visited[i]: dfs(_UpperCamelCase , 0 ) for i in range(len(_UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
60
0
"""simple docstring""" def A__ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 0 while len(_UpperCamelCase ) > 1: _lowerCAmelCase = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): _lowerCAmelCase = files.index(min(_UpperCamelCase ) ) temp += files[min_index] files.pop(_UpperCamelCase ) files.append(_UpperCamelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
589
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int: '''simple docstring''' snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20} snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case_ : str = parent snake_case_ : Optional[int] = batch_size snake_case_ : Dict = num_channels snake_case_ : List[Any] = image_size snake_case_ : Union[str, Any] = min_resolution snake_case_ : Tuple = max_resolution snake_case_ : str = do_resize snake_case_ : Tuple = size snake_case_ : int = do_center_crop snake_case_ : Tuple = crop_size snake_case_ : int = do_normalize snake_case_ : Optional[Any] = image_mean snake_case_ : List[str] = image_std snake_case_ : str = do_reduce_labels def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] ) snake_case_ : str = Image.open(dataset[1]['''file'''] ) return image, map def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] ) snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] ) snake_case_ : List[str] = Image.open(ds[2]['''file'''] ) snake_case_ : str = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = BeitImageProcessingTester(self ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) ) self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) snake_case_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) snake_case_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs() snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs() snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) snake_case_ : List[Any] = True snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
60
0
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __SCREAMING_SNAKE_CASE : Tuple = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __SCREAMING_SNAKE_CASE : List[Any] = 'main' # Default branch name __SCREAMING_SNAKE_CASE : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2' # One particular commit (not the top of `main`) __SCREAMING_SNAKE_CASE : Optional[Any] = 'aaaaaaa' # This commit does not exist, so we should 404. __SCREAMING_SNAKE_CASE : Union[str, Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684' # Sha-1 of config.json on the top of `main`, for checking purposes __SCREAMING_SNAKE_CASE : Tuple = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3' @contextlib.contextmanager def UpperCAmelCase__ ( ): '''simple docstring''' print('''Welcome!''' ) yield print('''Bye!''' ) @contextlib.contextmanager def UpperCAmelCase__ ( ): '''simple docstring''' print('''Bonjour!''' ) yield print('''Au revoir!''' ) class __magic_name__ ( unittest.TestCase ): def _A ( self : int ): assert transformers.__spec__ is not None assert importlib.util.find_spec('''transformers''' ) is not None class __magic_name__ ( unittest.TestCase ): @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def _A ( self : Any , lowerCamelCase__ : Dict ): with ContextManagers([] ): print('''Transformers are awesome!''' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def _A ( self : Dict , lowerCamelCase__ : Tuple ): with ContextManagers([context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def _A ( self : str , lowerCamelCase__ : List[Any] ): with ContextManagers([context_fr(), context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' ) @require_torch def _A ( self : Optional[Any] ): self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] ) self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] ) class __magic_name__ ( _a ): pass self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] ) @require_tf def _A ( self : Optional[Any] ): self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] ) self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] ) class __magic_name__ ( _a ): pass self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] ) @require_flax def _A ( self : Dict ): self.assertEqual(find_labels(lowerCamelCase__ ) , [] ) self.assertEqual(find_labels(lowerCamelCase__ ) , [] ) self.assertEqual(find_labels(lowerCamelCase__ ) , [] ) class __magic_name__ ( _a ): pass self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
348
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : List[Any] = mean_squared_error( __magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ ) return {"mse": mse}
60
0
"""simple docstring""" from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> dict: _lowerCAmelCase : Tuple = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty" return requests.get(_UpperCamelCase ).json() def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10 ) -> list[dict]: _lowerCAmelCase : Any = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' _lowerCAmelCase : Tuple = requests.get(_UpperCamelCase ).json()[:max_stories] return [get_hackernews_story(_UpperCamelCase ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10 ) -> str: _lowerCAmelCase : int = hackernews_top_stories(_UpperCamelCase ) return "\n".join("""* [{title}]({url})""".format(**_UpperCamelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
213
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : lowerCamelCase_ : Any = None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
60
0
import numpy # List of input, output pairs a_ = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) a_ = (((515, 22, 13), 555), ((61, 35, 49), 150)) a_ = [2, 4, 1, 5] a_ = len(train_data) a_ = 0.0_09 def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Union[str, Any]="train" ): return calculate_hypothesis_value(_UpperCamelCase ,_UpperCamelCase ) - output( _UpperCamelCase ,_UpperCamelCase ) def a__ ( _UpperCamelCase : Optional[int] ): __lowerCamelCase = 0 for i in range(len(_UpperCamelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Optional[int] ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : str=m ): __lowerCamelCase = 0 for i in range(_UpperCamelCase ): if index == -1: summation_value += _error(_UpperCamelCase ) else: summation_value += _error(_UpperCamelCase ) * train_data[i][0][index] return summation_value def a__ ( _UpperCamelCase : Optional[int] ): __lowerCamelCase = summation_of_cost_derivative(_UpperCamelCase ,_UpperCamelCase ) / m return cost_derivative_value def a__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output __lowerCamelCase = 0.000_002 __lowerCamelCase = 0 __lowerCamelCase = 0 while True: j += 1 __lowerCamelCase = [0, 0, 0, 0] for i in range(0 ,len(_UpperCamelCase ) ): __lowerCamelCase = get_cost_derivative(i - 1 ) __lowerCamelCase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( _UpperCamelCase ,_UpperCamelCase ,atol=_UpperCamelCase ,rtol=_UpperCamelCase ,): break __lowerCamelCase = temp_parameter_vector print(('''Number of iterations:''', j) ) def a__ ( ): for i in range(len(_UpperCamelCase ) ): print(('''Actual output value:''', output(_UpperCamelCase ,'''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase ,'''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
175
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : str = None @staticmethod def lowerCamelCase () -> Any: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase (cls ) -> List[Any]: '''simple docstring''' return F'''`pip install {cls.pip_package or cls.name}`''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[int] = '''optuna''' @staticmethod def lowerCamelCase () -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_optuna(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''ray''' lowerCamelCase_ : List[str] = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase () -> List[Any]: '''simple docstring''' return is_ray_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_ray(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''sigopt''' @staticmethod def lowerCamelCase () -> Optional[int]: '''simple docstring''' return is_sigopt_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' return default_hp_space_sigopt(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''wandb''' @staticmethod def lowerCamelCase () -> Dict: '''simple docstring''' return is_wandb_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return default_hp_space_wandb(__magic_name__ ) lowerCAmelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_UpperCamelCase ) > 0: snake_case_ : Dict = available_backends[0].name if len(_UpperCamelCase ) > 1: logger.info( f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
60
0
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _snake_case = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False) parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not') parser.add_argument('--steps', default=None, type=int, help='Num inference steps') _snake_case = parser.parse_args() _snake_case = 'cpu' _snake_case = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings' _snake_case = 'path-to-your-trained-model' _snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _snake_case = pipe.to(device) # to channels last _snake_case = pipe.unet.to(memory_format=torch.channels_last) _snake_case = pipe.vae.to(memory_format=torch.channels_last) _snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _snake_case = torch.randn(2, 4, 64, 64) _snake_case = torch.rand(1) * 999 _snake_case = torch.randn(2, 77, 768) _snake_case = (sample, timestep, encoder_hidden_status) try: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _snake_case = 666 _snake_case = torch.Generator(device).manual_seed(seed) _snake_case = {'generator': generator} if args.steps is not None: _snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('generated.png')
245
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list: """simple docstring""" snake_case_ : Tuple = len(_UpperCamelCase ) snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): snake_case_ : Any = y_points[i] for i in range(2 , _UpperCamelCase ): for j in range(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[int] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
60
0
"""simple docstring""" import numpy as np def lowercase (_snake_case ) -> np.array: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def lowercase (_snake_case ) -> np.array: '''simple docstring''' return vector * sigmoid(1.7_0_2 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
505
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters a_ = (720, 1280) # Height, Width a_ = (0.4, 0.6) # if height or width lower than this scale, drop it. a_ = 1 / 100 a_ = '''''' a_ = '''''' a_ = '''''' a_ = 250 def _a ( ) -> None: """simple docstring""" lowerCAmelCase__ = get_dataset(_UpperCamelCase , _UpperCamelCase ) for index in range(_UpperCamelCase ): lowerCAmelCase__ = random.sample(range(len(_UpperCamelCase ) ) , 4 ) lowerCAmelCase__ = update_image_and_anno( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , filter_scale=_UpperCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowerCAmelCase__ = random_chars(32 ) lowerCAmelCase__ = path.split(os.sep )[-1].rsplit("." , 1 )[0] lowerCAmelCase__ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(F"{file_root}.jpg" , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) lowerCAmelCase__ = [] for anno in new_annos: lowerCAmelCase__ = anno[3] - anno[1] lowerCAmelCase__ = anno[4] - anno[2] lowerCAmelCase__ = anno[1] + width / 2 lowerCAmelCase__ = anno[2] + height / 2 lowerCAmelCase__ = F"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(_UpperCamelCase ) with open(F"{file_root}.txt" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> tuple[list, list]: """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = [] for label_file in glob.glob(os.path.join(_UpperCamelCase , "*.txt" ) ): lowerCAmelCase__ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_UpperCamelCase ) as in_file: lowerCAmelCase__ = in_file.readlines() lowerCAmelCase__ = os.path.join(_UpperCamelCase , F"{label_name}.jpg" ) lowerCAmelCase__ = [] for obj_list in obj_lists: lowerCAmelCase__ = obj_list.rstrip("\n" ).split(" " ) lowerCAmelCase__ = float(obj[1] ) - float(obj[3] ) / 2 lowerCAmelCase__ = float(obj[2] ) - float(obj[4] ) / 2 lowerCAmelCase__ = float(obj[1] ) + float(obj[3] ) / 2 lowerCAmelCase__ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(_UpperCamelCase ) labels.append(_UpperCamelCase ) return img_paths, labels def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple = 0.0 , ) -> tuple[list, list, str]: """simple docstring""" lowerCAmelCase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) lowerCAmelCase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowerCAmelCase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowerCAmelCase__ = int(scale_x * output_size[1] ) lowerCAmelCase__ = int(scale_y * output_size[0] ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] for i, index in enumerate(_UpperCamelCase ): lowerCAmelCase__ = all_img_list[index] path_list.append(_UpperCamelCase ) lowerCAmelCase__ = all_annos[index] lowerCAmelCase__ = cva.imread(_UpperCamelCase ) if i == 0: # top-left lowerCAmelCase__ = cva.resize(_UpperCamelCase , (divid_point_x, divid_point_y) ) lowerCAmelCase__ = img for bbox in img_annos: lowerCAmelCase__ = bbox[1] * scale_x lowerCAmelCase__ = bbox[2] * scale_y lowerCAmelCase__ = bbox[3] * scale_x lowerCAmelCase__ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right lowerCAmelCase__ = cva.resize(_UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) lowerCAmelCase__ = img for bbox in img_annos: lowerCAmelCase__ = scale_x + bbox[1] * (1 - scale_x) lowerCAmelCase__ = bbox[2] * scale_y lowerCAmelCase__ = scale_x + bbox[3] * (1 - scale_x) lowerCAmelCase__ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left lowerCAmelCase__ = cva.resize(_UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) lowerCAmelCase__ = img for bbox in img_annos: lowerCAmelCase__ = bbox[1] * scale_x lowerCAmelCase__ = scale_y + bbox[2] * (1 - scale_y) lowerCAmelCase__ = bbox[3] * scale_x lowerCAmelCase__ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right lowerCAmelCase__ = cva.resize( _UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) lowerCAmelCase__ = img for bbox in img_annos: lowerCAmelCase__ = scale_x + bbox[1] * (1 - scale_x) lowerCAmelCase__ = scale_y + bbox[2] * (1 - scale_y) lowerCAmelCase__ = scale_x + bbox[3] * (1 - scale_x) lowerCAmelCase__ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: lowerCAmelCase__ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def _a ( UpperCamelCase_ : Optional[int] ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" lowerCAmelCase__ = ascii_lowercase + digits return "".join(random.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) if __name__ == "__main__": main() print('''DONE ✅''')
339
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return getitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" return setitem, k, v def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" return delitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str: """simple docstring""" try: return fun(_UpperCamelCase , *_UpperCamelCase ), None except Exception as e: return None, e lowerCAmelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCAmelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Any = HashMap(initial_block_size=4 ) snake_case_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(_UpperCamelCase ): snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) assert my_res == py_res assert str(_UpperCamelCase ) == str(_UpperCamelCase ) assert set(_UpperCamelCase ) == set(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" def is_public(_UpperCamelCase ) -> bool: return not name.startswith('''_''' ) snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )} snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )} assert dict_public_names > hash_public_names
60
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCAmelCase__ ( _a ): '''simple docstring''' lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
184
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase ) -> list: """simple docstring""" if len(_UpperCamelCase ) == 0: return [] snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase ) snake_case_ : List[str] = int(max_value - min_value ) + 1 snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )] for i in my_list: buckets[int(i - min_value )].append(_UpperCamelCase ) return [v for bucket in buckets for v in sorted(_UpperCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
60
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[Any] = logging.get_logger(__name__) a : str = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( _a ): """simple docstring""" __lowerCamelCase = '''pix2struct_text_model''' __lowerCamelCase = ['''past_key_values'''] __lowerCamelCase = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : str= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Union[str, Any]= d_kv lowercase__ : Dict= d_ff lowercase__ : str= num_layers lowercase__ : Tuple= num_heads lowercase__ : int= relative_attention_num_buckets lowercase__ : Optional[int]= relative_attention_max_distance lowercase__ : int= dropout_rate lowercase__ : Optional[Any]= layer_norm_epsilon lowercase__ : Tuple= initializer_factor lowercase__ : Union[str, Any]= use_cache lowercase__ : str= eos_token_id lowercase__ : Dict= decoder_start_token_id # for backwards compatibility lowercase__ : Tuple= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__ : Optional[int]= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Dict= config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( _a ): """simple docstring""" __lowerCamelCase = '''pix2struct_vision_model''' def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Any= hidden_size lowercase__ : int= patch_embed_hidden_size lowercase__ : List[Any]= d_ff lowercase__ : str= dropout_rate lowercase__ : str= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Tuple= initializer_range lowercase__ : Any= initializer_factor lowercase__ : str= attention_dropout lowercase__ : Optional[int]= layer_norm_eps lowercase__ : Optional[int]= dense_act_fn lowercase__ : List[str]= seq_len lowercase__ : Optional[int]= relative_attention_num_buckets lowercase__ : List[str]= relative_attention_max_distance lowercase__ : Tuple= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__ : Optional[int]= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( _a ): """simple docstring""" __lowerCamelCase = '''pix2struct''' __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : Dict= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : Union[str, Any]= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : Optional[Any]= PixaStructTextConfig(**snake_case__ ) lowercase__ : Optional[int]= PixaStructVisionConfig(**snake_case__ ) lowercase__ : Optional[int]= self.text_config.decoder_start_token_id lowercase__ : Tuple= self.text_config.pad_token_id lowercase__ : List[str]= self.text_config.eos_token_id lowercase__ : int= initializer_factor lowercase__ : List[str]= initializer_range lowercase__ : Tuple= self.initializer_range lowercase__ : List[Any]= self.initializer_range lowercase__ : int= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= copy.deepcopy(self.__dict__ ) lowercase__ : Dict= self.text_config.to_dict() lowercase__ : Dict= self.vision_config.to_dict() lowercase__ : int= self.__class__.model_type return output
218
import tensorflow as tf from ...tf_utils import shape_list class __lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[Any] = vocab_size snake_case_ : Dict = d_embed snake_case_ : Union[str, Any] = d_proj snake_case_ : str = cutoffs + [vocab_size] snake_case_ : int = [0] + self.cutoffs snake_case_ : Optional[int] = div_val snake_case_ : int = self.cutoffs[0] snake_case_ : Any = len(self.cutoffs ) - 1 snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters snake_case_ : str = keep_order snake_case_ : int = [] snake_case_ : Union[str, Any] = [] def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' if self.n_clusters > 0: snake_case_ : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case_ : List[str] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , ) self.out_projs.append(__magic_name__ ) else: self.out_projs.append(__magic_name__ ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : List[str] = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i) snake_case_ : int = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' ) self.out_projs.append(__magic_name__ ) snake_case_ : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : Any = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(__magic_name__ ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = x if proj is not None: snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ ) return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = shape_list(__magic_name__ ) snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) snake_case_ : Dict = tf.stack([r, target] , 1 ) return tf.gather_nd(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = 0 if self.n_clusters == 0: snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ ) snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 ) else: snake_case_ : Optional[int] = shape_list(__magic_name__ ) snake_case_ : int = [] snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case_ : str = (target >= l_idx) & (target < r_idx) snake_case_ : Dict = tf.where(__magic_name__ ) snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx if self.div_val == 1: snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx] snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx] else: snake_case_ : Union[str, Any] = self.out_layers[i][0] snake_case_ : int = self.out_layers[i][1] if i == 0: snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 ) snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] ) snake_case_ : Any = tf.nn.log_softmax(__magic_name__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ ) else: snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] ) snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ ) snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__magic_name__ ) if target is not None: snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) ) snake_case_ : str = tf.concat(__magic_name__ , axis=-1 ) if target is not None: if return_mean: snake_case_ : int = tf.reduce_mean(__magic_name__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__magic_name__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
60
0
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" a_ = inspect.getfile(accelerate.test_utils ) a_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] ) a_ = ['''accelerate''', '''launch'''] a_ = Path.home() / '''.cache/huggingface/accelerate''' a_ = '''default_config.yaml''' a_ = config_folder / config_file a_ = config_folder / '''_default_config.yaml''' a_ = Path("tests/test_configs" ) @classmethod def _lowercase ( cls : str ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _lowercase ( cls : Optional[Any] ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _lowercase ( self : Optional[Any] ): snake_case__ : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _lowercase ( self : Union[str, Any] ): for config in sorted(self.test_config_path.glob("**/*.yaml" ) ): with self.subTest(config_file=__A ): execute_subprocess_async( self.base_cmd + ["--config_file", str(__A ), self.test_file_path] , env=os.environ.copy() ) def _lowercase ( self : Optional[int] ): execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" a_ = '''test-tpu''' a_ = '''us-central1-a''' a_ = '''ls''' a_ = ['''accelerate''', '''tpu-config'''] a_ = '''cd /usr/share''' a_ = '''tests/test_samples/test_command_file.sh''' a_ = '''Running gcloud compute tpus tpu-vm ssh''' def _lowercase ( self : Dict ): snake_case__ : int = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __A , ) def _lowercase ( self : Tuple ): snake_case__ : Optional[int] = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __A , ) def _lowercase ( self : Union[str, Any] ): snake_case__ : List[str] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=__A ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __A , ) def _lowercase ( self : Optional[int] ): snake_case__ : List[Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __A , ) def _lowercase ( self : Tuple ): snake_case__ : Any = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __A , ) def _lowercase ( self : str ): snake_case__ : str = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __A , ) def _lowercase ( self : Any ): snake_case__ : Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __A , ) def _lowercase ( self : Dict ): snake_case__ : Any = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __A , ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] , return_stdout=__A , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __A , )
297
import requests def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Tuple = {'''Content-Type''': '''application/json'''} snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase ) if response.status_code != 200: snake_case_ : List[Any] = ( '''Request to slack returned an error ''' f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(_UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
60
0