repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
transformers | transformers-main/utils/check_config_docstrings.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
PATH_TO_TRANSFORMERS = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def get_checkpoint_from_config_class(config_class):
checkpoint = None
# source code of `config_class`
config_source = inspect.getsource(config_class)
checkpoints = _re_checkpoint.findall(config_source)
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/"):
ckpt_link = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
checkpoint = ckpt_name
break
return checkpoint
def check_config_docstrings_have_checkpoints():
configs_without_checkpoint = []
for config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
checkpoint = get_checkpoint_from_config_class(config_class)
name = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(name)
if len(configs_without_checkpoint) > 0:
message = "\n".join(sorted(configs_without_checkpoint))
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}")
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 3,279 | 34.268817 | 105 | py |
transformers | transformers-main/utils/notification_service_doc_tests.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def handle_test_results(test_results):
expressions = test_results.split(" ")
failed = 0
success = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(expressions):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def extract_first_line_failure(failures_short_lines):
failures = {}
file = None
in_error = False
for line in failures_short_lines.split("\n"):
if re.search(r"_ \[doctest\]", line):
in_error = True
file = line.split(" ")[2]
elif in_error and not line.split(" ")[0].isdigit():
failures[file] = line
in_error = False
return failures
class Message:
def __init__(self, title: str, doc_test_results: Dict):
self.title = title
self._time_spent = doc_test_results["time_spent"].split(",")[0]
self.n_success = doc_test_results["success"]
self.n_failures = doc_test_results["failures"]
self.n_tests = self.n_success + self.n_failures
# Failures and success of the modeling tests
self.doc_test_results = doc_test_results
@property
def time(self) -> str:
time_spent = [self._time_spent]
total_secs = 0
for time in time_spent:
time_parts = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(time_parts) == 1:
time_parts = [0, 0, time_parts[0]]
hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(hours)}h{int(minutes)}m{int(seconds)}s"
@property
def header(self) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def no_failures(self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def failures(self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def category_failures(self) -> Dict:
line_length = 40
category_failures = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(v, dict)}
report = ""
for category, failures in category_failures.items():
if len(failures) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(failures)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def payload(self) -> str:
blocks = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(blocks)
@staticmethod
def error_out():
payload = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(payload)}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
text="There was an issue running the tests.",
blocks=payload,
)
def post(self):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
self.thread_ts = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
blocks=self.payload,
text=text,
)
def get_reply_blocks(self, job_name, job_link, failures, text):
failures_text = ""
for key, value in failures.items():
value = value[:200] + " [Truncated]" if len(value) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
title = job_name
content = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
content["accessory"] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def post_reply(self):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
job_link = self.doc_test_results.pop("job_link")
self.doc_test_results.pop("failures")
self.doc_test_results.pop("success")
self.doc_test_results.pop("time_spent")
sorted_dict = sorted(self.doc_test_results.items(), key=lambda t: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
text = f"*Num failures* :{len(job_result['failed'])} \n"
failures = job_result["failures"]
blocks = self.get_reply_blocks(job, job_link, failures, text=text)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
text=f"Results for {job}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
def get_job_links():
run_id = os.environ["GITHUB_RUN_ID"]
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
result = requests.get(url).json()
jobs = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}").json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
return jobs
except Exception as e:
print("Unknown error, could not fetch links.", e)
return {}
def retrieve_artifact(name: str):
_artifact = {}
if os.path.exists(name):
files = os.listdir(name)
for file in files:
try:
with open(os.path.join(name, file), encoding="utf-8") as f:
_artifact[file.split(".")[0]] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(name, file)}.") from e
return _artifact
def retrieve_available_artifacts():
class Artifact:
def __init__(self, name: str):
self.name = name
self.paths = []
def __str__(self):
return self.name
def add_path(self, path: str):
self.paths.append({"name": self.name, "path": path})
_available_artifacts: Dict[str, Artifact] = {}
directories = filter(os.path.isdir, os.listdir())
for directory in directories:
artifact_name = directory
if artifact_name not in _available_artifacts:
_available_artifacts[artifact_name] = Artifact(artifact_name)
_available_artifacts[artifact_name].add_path(directory)
return _available_artifacts
if __name__ == "__main__":
github_actions_job_links = get_job_links()
available_artifacts = retrieve_available_artifacts()
docs = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
doc_test_results = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
doc_test_results["job_link"] = github_actions_job_links.get("run_doctests")
artifact_path = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
artifact = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
failed, success, time_spent = handle_test_results(artifact["stats"])
doc_test_results["failures"] = failed
doc_test_results["success"] = success
doc_test_results["time_spent"] = time_spent[1:-1] + ", "
all_failures = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
line = line.replace("FAILED ", "")
line = line.split()[0].replace("\n", "")
if "::" in line:
file_path, test = line.split("::")
else:
file_path, test = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
category = docs[file_regex]
doc_test_results[category]["failed"].append(test)
failure = all_failures[test] if test in all_failures else "N/A"
doc_test_results[category]["failures"][test] = failure
break
message = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 12,860 | 32.755906 | 117 | py |
transformers | transformers-main/utils/custom_init_isort.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
PATH_TO_TRANSFORMERS = "src/transformers"
# Pattern that looks at the indentation in a line.
_re_indent = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_re_direct_key = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_re_indirect_key = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_re_strip_line = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_re_bracket_content = re.compile(r"\[([^\]]+)\]")
def get_indent(line):
"""Returns the indent in `line`."""
search = _re_indent.search(line)
return "" if search is None else search.groups()[0]
def split_code_in_indented_blocks(code, indent_level="", start_prompt=None, end_prompt=None):
"""
Split `code` into its indented blocks, starting at `indent_level`. If provided, begins splitting after
`start_prompt` and stops at `end_prompt` (but returns what's before `start_prompt` as a first block and what's
after `end_prompt` as a last block, so `code` is always the same as joining the result of this function).
"""
# Let's split the code into lines and move to start_index.
index = 0
lines = code.split("\n")
if start_prompt is not None:
while not lines[index].startswith(start_prompt):
index += 1
blocks = ["\n".join(lines[:index])]
else:
blocks = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
current_block = [lines[index]]
index += 1
while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "):
current_block.append(lines[index])
blocks.append("\n".join(current_block))
if index < len(lines) - 1:
current_block = [lines[index + 1]]
index += 1
else:
current_block = []
else:
blocks.append("\n".join(current_block))
current_block = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(current_block) > 0:
blocks.append("\n".join(current_block))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lines):
blocks.append("\n".join(lines[index:]))
return blocks
def ignore_underscore(key):
"Wraps a `key` (that maps an object to string) to lower case and remove underscores."
def _inner(x):
return key(x).lower().replace("_", "")
return _inner
def sort_objects(objects, key=None):
"Sort a list of `objects` following the rules of isort. `key` optionally maps an object to a str."
# If no key is provided, we use a noop.
def noop(x):
return x
if key is None:
key = noop
# Constants are all uppercase, they go first.
constants = [obj for obj in objects if key(obj).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
classes = [obj for obj in objects if key(obj)[0].isupper() and not key(obj).isupper()]
# Functions begin with a lowercase, they go last.
functions = [obj for obj in objects if not key(obj)[0].isupper()]
key1 = ignore_underscore(key)
return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1)
def sort_objects_in_import(import_statement):
"""
Return the same `import_statement` but with objects properly sorted.
"""
# This inner function sort imports between [ ].
def _replace(match):
imports = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
keys = [part.strip().replace('"', "") for part in imports.split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
keys = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + "]"
lines = import_statement.split("\n")
if len(lines) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
idx = 2 if lines[1].strip() == "[" else 1
keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1])
sorted_lines = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lines) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
lines[1] = _re_bracket_content.sub(_replace, lines[1])
else:
keys = [part.strip().replace('"', "") for part in lines[1].split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
keys = keys[:-1]
lines[1] = get_indent(lines[1]) + ", ".join([f'"{k}"' for k in sort_objects(keys)])
return "\n".join(lines)
else:
# Finally we have to deal with imports fitting on one line
import_statement = _re_bracket_content.sub(_replace, import_statement)
return import_statement
def sort_imports(file, check_only=True):
"""
Sort `_import_structure` imports in `file`, `check_only` determines if we only check or overwrite.
"""
with open(file, encoding="utf-8") as f:
code = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
main_blocks = split_code_in_indented_blocks(
code, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:"
)
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(main_blocks) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
block = main_blocks[block_idx]
block_lines = block.split("\n")
# Get to the start of the imports.
line_idx = 0
while line_idx < len(block_lines) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
line_idx = len(block_lines)
else:
line_idx += 1
if line_idx >= len(block_lines):
continue
# Ignore beginning and last line: they don't contain anything.
internal_block_code = "\n".join(block_lines[line_idx:-1])
indent = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent)
# We have two categories of import key: list or _import_structure[key].append/extend
pattern = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None]
sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
count = 0
reorderded_blocks = []
for i in range(len(internal_blocks)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
block = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(block)
count += 1
# And we put our main block back together with its first and last line.
main_blocks[block_idx] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(main_blocks):
if check_only:
return True
else:
print(f"Overwriting {file}.")
with open(file, "w", encoding="utf-8") as f:
f.write("\n".join(main_blocks))
def sort_imports_in_all_inits(check_only=True):
failures = []
for root, _, files in os.walk(PATH_TO_TRANSFORMERS):
if "__init__.py" in files:
result = sort_imports(os.path.join(root, "__init__.py"), check_only=check_only)
if result:
failures = [os.path.join(root, "__init__.py")]
if len(failures) > 0:
raise ValueError(f"Would overwrite {len(failures)} files, run `make style`.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
args = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 10,282 | 39.644269 | 116 | py |
transformers | transformers-main/utils/check_self_hosted_runner.py | import argparse
import json
import subprocess
def get_runner_status(target_runners, token):
offline_runners = []
cmd = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
o = output.stdout.decode("utf-8")
status = json.loads(o)
runners = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(runner)
# save the result so we can report them on Slack
with open("offline_runners.txt", "w") as fp:
fp.write(json.dumps(offline_runners))
if len(offline_runners) > 0:
failed = "\n".join([x["name"] for x in offline_runners])
raise ValueError(f"The following runners are offline:\n{failed}")
if __name__ == "__main__":
def list_str(values):
return values.split(",")
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
args = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 1,538 | 28.037736 | 106 | py |
transformers | transformers-main/utils/update_tiny_models.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script running `create_dummy_models.py` with a pre-defined set of arguments.
This file is intended to be used in a CI workflow file without the need of specifying arguments. It creates and uploads
tiny models for all model classes (if their tiny versions are not on the Hub yet), as well as produces an updated
version of `tests/utils/tiny_model_summary.json`. That updated file should be merged into the `main` branch of
`transformers` so the pipeline testing will use the latest created/updated tiny models.
"""
import argparse
import copy
import json
import multiprocessing
import os
import time
from create_dummy_models import COMPOSITE_MODELS, create_tiny_models
from huggingface_hub import ModelFilter, hf_api
import transformers
from transformers import AutoFeatureExtractor, AutoImageProcessor, AutoTokenizer
from transformers.image_processing_utils import BaseImageProcessor
def get_all_model_names():
model_names = set()
# Each auto modeling files contains multiple mappings. Let's get them in a dynamic way.
for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]:
module = getattr(transformers.models.auto, module_name, None)
if module is None:
continue
# all mappings in a single auto modeling file
mapping_names = [
x
for x in dir(module)
if x.endswith("_MAPPING_NAMES")
and (x.startswith("MODEL_") or x.startswith("TF_MODEL_") or x.startswith("FLAX_MODEL_"))
]
for name in mapping_names:
mapping = getattr(module, name)
if mapping is not None:
for v in mapping.values():
if isinstance(v, (list, tuple)):
model_names.update(v)
elif isinstance(v, str):
model_names.add(v)
return sorted(model_names)
def get_tiny_model_names_from_repo():
# All model names defined in auto mappings
model_names = set(get_all_model_names())
with open("tests/utils/tiny_model_summary.json") as fp:
tiny_model_info = json.load(fp)
tiny_models_names = set()
for model_base_name in tiny_model_info:
tiny_models_names.update(tiny_model_info[model_base_name]["model_classes"])
# Remove a tiny model name if one of its framework implementation hasn't yet a tiny version on the Hub.
not_on_hub = model_names.difference(tiny_models_names)
for model_name in copy.copy(tiny_models_names):
if not model_name.startswith("TF") and f"TF{model_name}" in not_on_hub:
tiny_models_names.remove(model_name)
elif model_name.startswith("TF") and model_name[2:] in not_on_hub:
tiny_models_names.remove(model_name)
return sorted(tiny_models_names)
def get_tiny_model_summary_from_hub(output_path):
special_models = COMPOSITE_MODELS.values()
# All tiny model base names on Hub
model_names = get_all_model_names()
models = hf_api.list_models(
filter=ModelFilter(
author="hf-internal-testing",
)
)
_models = set()
for x in models:
model = x.modelId
org, model = model.split("/")
if not model.startswith("tiny-random-"):
continue
model = model.replace("tiny-random-", "")
if not model[0].isupper():
continue
if model not in model_names and model not in special_models:
continue
_models.add(model)
models = sorted(_models)
# All tiny model names on Hub
summary = {}
for model in models:
repo_id = f"hf-internal-testing/tiny-random-{model}"
model = model.split("-")[0]
try:
repo_info = hf_api.repo_info(repo_id)
content = {
"tokenizer_classes": set(),
"processor_classes": set(),
"model_classes": set(),
"sha": repo_info.sha,
}
except Exception:
continue
try:
time.sleep(1)
tokenizer_fast = AutoTokenizer.from_pretrained(repo_id)
content["tokenizer_classes"].add(tokenizer_fast.__class__.__name__)
except Exception:
pass
try:
time.sleep(1)
tokenizer_slow = AutoTokenizer.from_pretrained(repo_id, use_fast=False)
content["tokenizer_classes"].add(tokenizer_slow.__class__.__name__)
except Exception:
pass
try:
time.sleep(1)
img_p = AutoImageProcessor.from_pretrained(repo_id)
content["processor_classes"].add(img_p.__class__.__name__)
except Exception:
pass
try:
time.sleep(1)
feat_p = AutoFeatureExtractor.from_pretrained(repo_id)
if not isinstance(feat_p, BaseImageProcessor):
content["processor_classes"].add(feat_p.__class__.__name__)
except Exception:
pass
try:
time.sleep(1)
model_class = getattr(transformers, model)
m = model_class.from_pretrained(repo_id)
content["model_classes"].add(m.__class__.__name__)
except Exception:
pass
try:
time.sleep(1)
model_class = getattr(transformers, f"TF{model}")
m = model_class.from_pretrained(repo_id)
content["model_classes"].add(m.__class__.__name__)
except Exception:
pass
content["tokenizer_classes"] = sorted(content["tokenizer_classes"])
content["processor_classes"] = sorted(content["processor_classes"])
content["model_classes"] = sorted(content["model_classes"])
summary[model] = content
with open(os.path.join(output_path, "hub_tiny_model_summary.json"), "w") as fp:
json.dump(summary, fp, ensure_ascii=False, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_workers", default=1, type=int, help="The number of workers to run.")
args = parser.parse_args()
# This has to be `spawn` to avoid hanging forever!
multiprocessing.set_start_method("spawn")
output_path = "tiny_models"
all = True
model_types = None
models_to_skip = get_tiny_model_names_from_repo()
no_check = True
upload = True
organization = "hf-internal-testing"
create_tiny_models(
output_path,
all,
model_types,
models_to_skip,
no_check,
upload,
organization,
token=os.environ.get("TOKEN", None),
num_workers=args.num_workers,
)
| 7,272 | 35.18408 | 119 | py |
transformers | transformers-main/utils/check_doc_toc.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import defaultdict
import yaml
PATH_TO_TOC = "docs/source/en/_toctree.yml"
def clean_model_doc_toc(model_doc):
"""
Cleans the table of content of the model documentation by removing duplicates and sorting models alphabetically.
"""
counts = defaultdict(int)
for doc in model_doc:
counts[doc["local"]] += 1
duplicates = [key for key, value in counts.items() if value > 1]
new_doc = []
for duplicate_key in duplicates:
titles = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key})
if len(titles) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others."
)
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1])
# Sort
return sorted(new_doc, key=lambda s: s["title"].lower())
def check_model_doc(overwrite=False):
with open(PATH_TO_TOC, encoding="utf-8") as f:
content = yaml.safe_load(f.read())
# Get to the API doc
api_idx = 0
while content[api_idx]["title"] != "API":
api_idx += 1
api_doc = content[api_idx]["sections"]
# Then to the model doc
model_idx = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
model_doc = api_doc[model_idx]["sections"]
modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section]
diff = False
for idx, modality_doc in modalities_docs:
old_modality_doc = modality_doc["sections"]
new_modality_doc = clean_model_doc_toc(old_modality_doc)
if old_modality_doc != new_modality_doc:
diff = True
if overwrite:
model_doc[idx]["sections"] = new_modality_doc
if diff:
if overwrite:
api_doc[model_idx]["sections"] = model_doc
content[api_idx]["sections"] = api_doc
with open(PATH_TO_TOC, "w", encoding="utf-8") as f:
f.write(yaml.dump(content, allow_unicode=True))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 3,385 | 33.20202 | 116 | py |
transformers | transformers-main/utils/tests_fetcher.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Welcome to tests_fetcher V2.
This util is designed to fetch tests to run on a PR so that only the tests impacted by the modifications are run, and
when too many models are being impacted, only run the tests of a subset of core models. It works like this.
Stage 1: Identify the modified files. This takes all the files from the branching point to the current commit (so
all modifications in a PR, not just the last commit) but excludes modifications that are on docstrings or comments
only.
Stage 2: Extract the tests to run. This is done by looking at the imports in each module and test file: if module A
imports module B, then changing module B impacts module A, so the tests using module A should be run. We thus get the
dependencies of each model and then recursively builds the 'reverse' map of dependencies to get all modules and tests
impacted by a given file. We then only keep the tests (and only the code models tests if there are too many modules).
Caveats:
- This module only filters tests by files (not individual tests) so it's better to have tests for different things
in different files.
- This module assumes inits are just importing things, not really building objects, so it's better to structure
them this way and move objects building in separate submodules.
"""
import argparse
import collections
import json
import os
import re
from contextlib import contextmanager
from pathlib import Path
from git import Repo
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
PATH_TO_EXAMPLES = PATH_TO_REPO / "examples"
PATH_TO_TRANFORMERS = PATH_TO_REPO / "src/transformers"
PATH_TO_TESTS = PATH_TO_REPO / "tests"
# List here the models to always test.
IMPORTANT_MODELS = [
"auto",
# Most downloaded models
"bert",
"clip",
"t5",
"xlm-roberta",
"gpt2",
"bart",
"mpnet",
"gpt-j",
"wav2vec2",
"deberta-v2",
"layoutlm",
"opt",
"longformer",
"vit",
# Pipeline-specific model (to be sure each pipeline has one model in this list)
"tapas",
"vilt",
"clap",
"detr",
"owlvit",
"dpt",
"videomae",
]
@contextmanager
def checkout_commit(repo, commit_id):
"""
Context manager that checks out a commit in the repo.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def clean_code(content):
"""
Remove docstrings, empty line or comments from `content`.
"""
# fmt: off
# Remove docstrings by splitting on triple " then triple ':
splits = content.split('\"\"\"')
content = "".join(splits[::2])
splits = content.split("\'\'\'")
# fmt: on
content = "".join(splits[::2])
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
if len(line) == 0 or line.isspace():
continue
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def keep_doc_examples_only(content):
"""
Remove code, docstring that is not code example, empty line or comments from `content`.
"""
# Keep doc examples only by splitting on triple "`"
splits = content.split("```")
# Add leading and trailing "```" so the navigation is easier when compared to the original input `content`
content = "```" + "```".join(splits[1::2]) + "```"
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
if len(line) == 0 or line.isspace():
continue
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def get_all_tests():
"""
Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`.
- folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded.
- folders under `tests/models`: `bert`, `gpt2`, etc.
- test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.
"""
# test folders/files directly under `tests` folder
tests = os.listdir(PATH_TO_TESTS)
tests = [f"tests/{f}" for f in tests if "__pycache__" not in f]
tests = sorted([f for f in tests if (PATH_TO_REPO / f).is_dir() or f.startswith("tests/test_")])
# model specific test folders
model_test_folders = os.listdir(PATH_TO_TESTS / "models")
model_test_folders = [f"tests/models/{f}" for f in model_test_folders if "__pycache__" not in f]
model_test_folders = sorted([f for f in model_test_folders if (PATH_TO_REPO / f).is_dir()])
tests.remove("tests/models")
# Sagemaker tests are not meant to be run on the CI.
if "tests/sagemaker" in tests:
tests.remove("tests/sagemaker")
tests = model_test_folders + tests
return tests
def diff_is_docstring_only(repo, branching_point, filename):
"""
Check if the diff is only in docstrings in a filename.
"""
folder = Path(repo.working_dir)
with checkout_commit(repo, branching_point):
with open(folder / filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(folder / filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = clean_code(old_content)
new_content_clean = clean_code(new_content)
return old_content_clean == new_content_clean
def diff_contains_doc_examples(repo, branching_point, filename):
"""
Check if the diff is only in code in a filename.
"""
folder = Path(repo.working_dir)
with checkout_commit(repo, branching_point):
with open(folder / filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(folder / filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = keep_doc_examples_only(old_content)
new_content_clean = keep_doc_examples_only(new_content)
return old_content_clean != new_content_clean
def get_diff(repo, base_commit, commits):
"""
Get's the diff between one or several commits and the head of the repository.
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break corresponding tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
def get_modified_python_files(diff_with_last_commit=False):
"""
Return a list of python files that have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
"""
repo = Repo(PATH_TO_REPO)
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
return get_diff(repo, repo.head.commit, parent_commits)
def get_diff_for_doctesting(repo, base_commit, commits):
"""
Get's the diff between one or several commits and the head of the repository where some doc example(s) are changed.
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We always add new python/md files
if diff_obj.change_type in ["A"] and (diff_obj.b_path.endswith(".py") or diff_obj.b_path.endswith(".md")):
code_diff.append(diff_obj.b_path)
# Now for modified files
elif (
diff_obj.change_type in ["M", "R"]
and diff_obj.b_path.endswith(".py")
or diff_obj.b_path.endswith(".md")
):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications contain some doc example(s).
if diff_contains_doc_examples(repo, commit, diff_obj.b_path):
code_diff.append(diff_obj.a_path)
else:
print(f"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.")
return code_diff
def get_doctest_files(diff_with_last_commit=False):
"""
Return a list of python and mdx files where some doc example(s) in them have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
"""
repo = Repo(PATH_TO_REPO)
test_files_to_run = [] # noqa
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits)
# This is the full list of doctest tests
with open("utils/documentation_tests.txt") as fp:
documentation_tests = set(fp.read().strip().split("\n"))
# Not to run slow doctest tests
with open("utils/slow_documentation_tests.txt") as fp:
slow_documentation_tests = set(fp.read().strip().split("\n"))
# So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%.
test_files_to_run = [
x for x in test_files_to_run if x in documentation_tests and x not in slow_documentation_tests
]
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]
return test_files_to_run
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+(\.+\S+)\s+import\s+([^\n]+) -> Line only contains from .xxx import yyy and we catch .xxx and yyy
# (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every
# other import.
_re_single_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+([^\n]+)(?=\n)")
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\) -> Line continues with from .xxx import (yyy) and we catch .xxx and yyy
# yyy will take multiple lines otherwise there wouldn't be parenthesis.
_re_multi_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\)")
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+transformers(\S*)\s+import\s+([^\n]+) -> Line only contains from transformers.xxx import yyy and we catch
# .xxx and yyy
# (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every
# other import.
_re_single_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+transformers(\S*)\s+import\s+([^\n]+)(?=\n)")
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\) -> Line continues with from transformers.xxx import (yyy) and we
# catch .xxx and yyy. yyy will take multiple lines otherwise there wouldn't be parenthesis.
_re_multi_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\)")
def extract_imports(module_fname, cache=None):
"""
Get the imports a given module makes. This takes a module filename and returns the list of module filenames
imported in the module with the objects imported in that module filename.
"""
if cache is not None and module_fname in cache:
return cache[module_fname]
with open(PATH_TO_REPO / module_fname, "r", encoding="utf-8") as f:
content = f.read()
# Filter out all docstrings to not get imports in code examples.
# fmt: off
splits = content.split('\"\"\"')
# fmt: on
content = "".join(splits[::2])
module_parts = str(module_fname).split(os.path.sep)
imported_modules = []
# Let's start with relative imports
relative_imports = _re_single_line_relative_imports.findall(content)
relative_imports = [
(mod, imp) for mod, imp in relative_imports if "# tests_ignore" not in imp and imp.strip() != "("
]
multiline_relative_imports = _re_multi_line_relative_imports.findall(content)
relative_imports += [(mod, imp) for mod, imp in multiline_relative_imports if "# tests_ignore" not in imp]
for module, imports in relative_imports:
level = 0
while module.startswith("."):
module = module[1:]
level += 1
if len(module) > 0:
dep_parts = module_parts[: len(module_parts) - level] + module.split(".")
else:
dep_parts = module_parts[: len(module_parts) - level]
imported_module = os.path.sep.join(dep_parts)
imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")]))
# Let's continue with direct imports
direct_imports = _re_single_line_direct_imports.findall(content)
direct_imports = [(mod, imp) for mod, imp in direct_imports if "# tests_ignore" not in imp and imp.strip() != "("]
multiline_direct_imports = _re_multi_line_direct_imports.findall(content)
direct_imports += [(mod, imp) for mod, imp in multiline_direct_imports if "# tests_ignore" not in imp]
for module, imports in direct_imports:
import_parts = module.split(".")[1:] # ignore the first .
dep_parts = ["src", "transformers"] + import_parts
imported_module = os.path.sep.join(dep_parts)
imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")]))
result = []
for module_file, imports in imported_modules:
if (PATH_TO_REPO / f"{module_file}.py").is_file():
module_file = f"{module_file}.py"
elif (PATH_TO_REPO / module_file).is_dir() and (PATH_TO_REPO / module_file / "__init__.py").is_file():
module_file = os.path.sep.join([module_file, "__init__.py"])
imports = [imp for imp in imports if len(imp) > 0 and re.match("^[A-Za-z0-9_]*$", imp)]
if len(imports) > 0:
result.append((module_file, imports))
if cache is not None:
cache[module_fname] = result
return result
def get_module_dependencies(module_fname, cache=None):
"""
Get the dependencies of a module from the module filename as a list of module filenames. This will resolve any
__init__ we pass: if we import from a submodule utils, the dependencies will be utils/foo.py and utils/bar.py (if
the objects imported actually come from utils.foo and utils.bar) not utils/__init__.py.
"""
dependencies = []
imported_modules = extract_imports(module_fname, cache=cache)
# The while loop is to recursively traverse all inits we may encounter.
while len(imported_modules) > 0:
new_modules = []
for module, imports in imported_modules:
# If we end up in an __init__ we are often not actually importing from this init (except in the case where
# the object is fully defined in the __init__)
if module.endswith("__init__.py"):
# So we get the imports from that init then try to find where our objects come from.
new_imported_modules = extract_imports(module, cache=cache)
for new_module, new_imports in new_imported_modules:
if any(i in new_imports for i in imports):
if new_module not in dependencies:
new_modules.append((new_module, [i for i in new_imports if i in imports]))
imports = [i for i in imports if i not in new_imports]
if len(imports) > 0:
# If there are any objects lefts, they may be a submodule
path_to_module = PATH_TO_REPO / module.replace("__init__.py", "")
dependencies.extend(
[
os.path.join(module.replace("__init__.py", ""), f"{i}.py")
for i in imports
if (path_to_module / f"{i}.py").is_file()
]
)
imports = [i for i in imports if not (path_to_module / f"{i}.py").is_file()]
if len(imports) > 0:
# Then if there are still objects left, they are fully defined in the init, so we keep it as a
# dependency.
dependencies.append(module)
else:
dependencies.append(module)
imported_modules = new_modules
return dependencies
def create_reverse_dependency_tree():
"""
Create a list of all edges (a, b) which mean that modifying a impacts b with a going over all module and test files.
"""
cache = {}
all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py"))
all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]
edges = [(dep, mod) for mod in all_modules for dep in get_module_dependencies(mod, cache=cache)]
return list(set(edges))
def get_tree_starting_at(module, edges):
"""
Returns the tree starting at a given module following all edges in the following format: [module, [list of edges
starting at module], [list of edges starting at the preceding level], ...]
"""
vertices_seen = [module]
new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and "__init__.py" not in edge[1]]
tree = [module]
while len(new_edges) > 0:
tree.append(new_edges)
final_vertices = list({edge[1] for edge in new_edges})
vertices_seen.extend(final_vertices)
new_edges = [
edge
for edge in edges
if edge[0] in final_vertices and edge[1] not in vertices_seen and "__init__.py" not in edge[1]
]
return tree
def print_tree_deps_of(module, all_edges=None):
"""
Prints the tree of modules depending on a given module.
"""
if all_edges is None:
all_edges = create_reverse_dependency_tree()
tree = get_tree_starting_at(module, all_edges)
# The list of lines is a list of tuples (line_to_be_printed, module)
# Keeping the modules lets us know where to insert each new lines in the list.
lines = [(tree[0], tree[0])]
for index in range(1, len(tree)):
edges = tree[index]
start_edges = {edge[0] for edge in edges}
for start in start_edges:
end_edges = {edge[1] for edge in edges if edge[0] == start}
# We will insert all those edges just after the line showing start.
pos = 0
while lines[pos][1] != start:
pos += 1
lines = lines[: pos + 1] + [(" " * (2 * index) + end, end) for end in end_edges] + lines[pos + 1 :]
for line in lines:
# We don't print the refs that where just here to help build lines.
print(line[0])
def init_test_examples_dependencies():
"""
The test examples do not import from the examples (which are just scripts, not modules) so we need som extra
care initializing the dependency map there.
"""
test_example_deps = {}
all_examples = []
for framework in ["flax", "pytorch", "tensorflow"]:
test_files = list((PATH_TO_EXAMPLES / framework).glob("test_*.py"))
all_examples.extend(test_files)
examples = [
f for f in (PATH_TO_EXAMPLES / framework).glob("**/*.py") if f.parent != PATH_TO_EXAMPLES / framework
]
all_examples.extend(examples)
for test_file in test_files:
with open(test_file, "r", encoding="utf-8") as f:
content = f.read()
test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [
str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content
]
test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append(
str(test_file.relative_to(PATH_TO_REPO))
)
return test_example_deps, all_examples
def create_reverse_dependency_map():
"""
Create the dependency map from module/test filename to the list of modules/tests that depend on it (even
recursively).
"""
cache = {}
example_deps, examples = init_test_examples_dependencies()
all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) + examples
all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]
direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules}
direct_deps.update(example_deps)
# This recurses the dependencies
something_changed = True
while something_changed:
something_changed = False
for m in all_modules:
for d in direct_deps[m]:
if d.endswith("__init__.py"):
continue
if d not in direct_deps:
raise ValueError(f"KeyError:{d}. From {m}")
new_deps = set(direct_deps[d]) - set(direct_deps[m])
if len(new_deps) > 0:
direct_deps[m].extend(list(new_deps))
something_changed = True
# Finally we can build the reverse map.
reverse_map = collections.defaultdict(list)
for m in all_modules:
for d in direct_deps[m]:
reverse_map[d].append(m)
for m in [f for f in all_modules if f.endswith("__init__.py")]:
direct_deps = get_module_dependencies(m, cache=cache)
deps = sum([reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")], direct_deps)
reverse_map[m] = list(set(deps) - {m})
return reverse_map
def create_module_to_test_map(reverse_map=None, filter_models=False):
"""
Extract the tests from the reverse_dependency_map and potentially filters the model tests.
"""
if reverse_map is None:
reverse_map = create_reverse_dependency_map()
def is_test(fname):
if fname.startswith("tests"):
return True
if fname.startswith("examples") and fname.split(os.path.sep)[-1].startswith("test"):
return True
return False
test_map = {module: [f for f in deps if is_test(f)] for module, deps in reverse_map.items()}
if not filter_models:
return test_map
num_model_tests = len(list(PATH_TO_TESTS.glob("models/*")))
def has_many_models(tests):
model_tests = {Path(t).parts[2] for t in tests if t.startswith("tests/models/")}
return len(model_tests) > num_model_tests // 2
def filter_tests(tests):
return [t for t in tests if not t.startswith("tests/models/") or Path(t).parts[2] in IMPORTANT_MODELS]
return {module: (filter_tests(tests) if has_many_models(tests) else tests) for module, tests in test_map.items()}
def check_imports_all_exist():
"""
Isn't used per se by the test fetcher but might be used later as a quality check. Putting this here for now so the
code is not lost.
"""
cache = {}
all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py"))
all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]
direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules}
for module, deps in direct_deps.items():
for dep in deps:
if not (PATH_TO_REPO / dep).is_file():
print(f"{module} has dependency on {dep} which does not exist.")
def _print_list(l):
return "\n".join([f"- {f}" for f in l])
def create_json_map(test_files_to_run, json_output_file):
if json_output_file is None:
return
test_map = {}
for test_file in test_files_to_run:
# `test_file` is a path to a test folder/file, starting with `tests/`. For example,
# - `tests/models/bert/test_modeling_bert.py` or `tests/models/bert`
# - `tests/trainer/test_trainer.py` or `tests/trainer`
# - `tests/test_modeling_common.py`
names = test_file.split(os.path.sep)
if names[1] == "models":
# take the part like `models/bert` for modeling tests
key = os.path.sep.join(names[1:3])
elif len(names) > 2 or not test_file.endswith(".py"):
# test folders under `tests` or python files under them
# take the part like tokenization, `pipeline`, etc. for other test categories
key = os.path.sep.join(names[1:2])
else:
# common test files directly under `tests/`
key = "common"
if key not in test_map:
test_map[key] = []
test_map[key].append(test_file)
# sort the keys & values
keys = sorted(test_map.keys())
test_map = {k: " ".join(sorted(test_map[k])) for k in keys}
with open(json_output_file, "w", encoding="UTF-8") as fp:
json.dump(test_map, fp, ensure_ascii=False)
def infer_tests_to_run(output_file, diff_with_last_commit=False, filter_models=True, json_output_file=None):
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}")
# Create the map that will give us all impacted modules.
reverse_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if f in reverse_map:
impacted_files.extend(reverse_map[f])
# Remove duplicates
impacted_files = sorted(set(impacted_files))
print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}")
# Grab the corresponding test files:
if "setup.py" in modified_files:
test_files_to_run = ["tests"]
repo_utils_launch = True
else:
# All modified tests need to be run.
test_files_to_run = [
f for f in modified_files if f.startswith("tests") and f.split(os.path.sep)[-1].startswith("test")
]
# Then we grab the corresponding test files.
test_map = create_module_to_test_map(reverse_map=reverse_map, filter_models=filter_models)
for f in modified_files:
if f in test_map:
test_files_to_run.extend(test_map[f])
test_files_to_run = sorted(set(test_files_to_run))
# Remove repo utils tests
test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == "repo_utils"]
# Remove SageMaker tests
test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == "sagemaker"]
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]
repo_utils_launch = any(f.split(os.path.sep)[0] == "utils" for f in modified_files)
if repo_utils_launch:
repo_util_file = Path(output_file).parent / "test_repo_utils.txt"
with open(repo_util_file, "w", encoding="utf-8") as f:
f.write("tests/repo_utils")
examples_tests_to_run = [f for f in test_files_to_run if f.startswith("examples")]
test_files_to_run = [f for f in test_files_to_run if not f.startswith("examples")]
print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}")
if len(test_files_to_run) > 0:
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files_to_run))
# Create a map that maps test categories to test files, i.e. `models/bert` -> [...test_modeling_bert.py, ...]
# Get all test directories (and some common test files) under `tests` and `tests/models` if `test_files_to_run`
# contains `tests` (i.e. when `setup.py` is changed).
if "tests" in test_files_to_run:
test_files_to_run = get_all_tests()
create_json_map(test_files_to_run, json_output_file)
print(f"\n### EXAMPLES TEST TO RUN ###\n{_print_list(examples_tests_to_run)}")
if len(examples_tests_to_run) > 0:
example_file = Path(output_file).parent / "examples_test_list.txt"
with open(example_file, "w", encoding="utf-8") as f:
f.write(" ".join(examples_tests_to_run))
doctest_list = get_doctest_files()
print(f"\n### DOCTEST TO RUN ###\n{_print_list(doctest_list)}")
if len(doctest_list) > 0:
doctest_file = Path(output_file).parent / "doctest_list.txt"
with open(doctest_file, "w", encoding="utf-8") as f:
f.write(" ".join(doctest_list))
def filter_tests(output_file, filters):
"""
Reads the content of the output file and filters out all the tests in a list of given folders.
Args:
output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher.
filters (`List[str]`): A list of folders to filter.
"""
if not os.path.isfile(output_file):
print("No test file found.")
return
with open(output_file, "r", encoding="utf-8") as f:
test_files = f.read().split(" ")
if len(test_files) == 0 or test_files == [""]:
print("No tests to filter.")
return
if test_files == ["tests"]:
test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py"] + filters]
else:
test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters]
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files))
def parse_commit_message(commit_message):
"""
Parses the commit message to detect if a command is there to skip, force all or part of the CI.
Returns a dictionary of strings to bools with keys skip, test_all_models and test_all.
"""
if commit_message is None:
return {"skip": False, "no_filter": False, "test_all": False}
command_search = re.search(r"\[([^\]]*)\]", commit_message)
if command_search is not None:
command = command_search.groups()[0]
command = command.lower().replace("-", " ").replace("_", " ")
skip = command in ["ci skip", "skip ci", "circleci skip", "skip circleci"]
no_filter = set(command.split(" ")) == {"no", "filter"}
test_all = set(command.split(" ")) == {"test", "all"}
return {"skip": skip, "no_filter": no_filter, "test_all": test_all}
else:
return {"skip": False, "no_filter": False, "test_all": False}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run"
)
parser.add_argument(
"--json_output_file",
type=str,
default="test_map.json",
help="Where to store the tests to run in a dictionary format mapping test categories to test files",
)
parser.add_argument(
"--diff_with_last_commit",
action="store_true",
help="To fetch the tests between the current commit and the last commit",
)
parser.add_argument(
"--filter_tests",
action="store_true",
help="Will filter the pipeline/repo utils tests outside of the generated list of tests.",
)
parser.add_argument(
"--print_dependencies_of",
type=str,
help="Will only print the tree of modules depending on the file passed.",
default=None,
)
parser.add_argument(
"--commit_message",
type=str,
help="The commit message (which could contain a command to force all tests or skip the CI).",
default=None,
)
args = parser.parse_args()
if args.print_dependencies_of is not None:
print_tree_deps_of(args.print_dependencies_of)
elif args.filter_tests:
filter_tests(args.output_file, ["pipelines", "repo_utils"])
else:
repo = Repo(PATH_TO_REPO)
commit_message = repo.head.commit.message
commit_flags = parse_commit_message(commit_message)
if commit_flags["skip"]:
print("Force-skipping the CI")
quit()
if commit_flags["no_filter"]:
print("Running all tests fetched without filtering.")
if commit_flags["test_all"]:
print("Force-launching all tests")
diff_with_last_commit = args.diff_with_last_commit
if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main:
print("main branch detected, fetching tests against last commit.")
diff_with_last_commit = True
if not commit_flags["test_all"]:
try:
infer_tests_to_run(
args.output_file,
diff_with_last_commit=diff_with_last_commit,
json_output_file=args.json_output_file,
filter_models=not commit_flags["no_filter"],
)
filter_tests(args.output_file, ["repo_utils"])
except Exception as e:
print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.")
commit_flags["test_all"] = True
if commit_flags["test_all"]:
with open(args.output_file, "w", encoding="utf-8") as f:
f.write("tests")
example_file = Path(args.output_file).parent / "examples_test_list.txt"
with open(example_file, "w", encoding="utf-8") as f:
f.write("all")
test_files_to_run = get_all_tests()
create_json_map(test_files_to_run, args.json_output_file)
| 36,418 | 40.574201 | 120 | py |
transformers | transformers-main/utils/check_doctest_list.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
REPO_PATH = "."
if __name__ == "__main__":
doctest_file_path = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
non_existent_paths = []
all_paths = []
with open(doctest_file_path) as fp:
for line in fp:
line = line.strip()
path = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
non_existent_paths = "\n".join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 1,574 | 38.375 | 111 | py |
transformers | transformers-main/utils/get_modified_files.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
modified_files = (
subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split()
)
joined_dirs = "|".join(sys.argv[1:])
regex = re.compile(rf"^({joined_dirs}).*?\.py$")
relevant_modified_files = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 1,506 | 39.72973 | 122 | py |
transformers | transformers-main/utils/print_env.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 1,723 | 28.724138 | 86 | py |
transformers | transformers-main/utils/create_dummy_models.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections.abc
import copy
import inspect
import json
import multiprocessing
import os
import shutil
import tempfile
import traceback
from pathlib import Path
from check_config_docstrings import get_checkpoint_from_config_class
from datasets import load_dataset
from get_test_info import get_model_to_tester_mapping, get_tester_classes_for_model
from huggingface_hub import Repository, create_repo, hf_api, upload_folder
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
IMAGE_PROCESSOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoTokenizer,
LayoutLMv3TokenizerFast,
PreTrainedTokenizer,
PreTrainedTokenizerFast,
logging,
)
from transformers.feature_extraction_utils import FeatureExtractionMixin
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.image_processing_utils import BaseImageProcessor
from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name
from transformers.models.fsmt import configuration_fsmt
from transformers.processing_utils import ProcessorMixin, transformers_module
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
# make sure tokenizer plays nice with multiprocessing
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logging.set_verbosity_error()
logging.disable_progress_bar()
logger = logging.get_logger(__name__)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if not is_torch_available():
raise ValueError("Please install PyTorch.")
if not is_tf_available():
raise ValueError("Please install TensorFlow.")
FRAMEWORKS = ["pytorch", "tensorflow"]
INVALID_ARCH = []
TARGET_VOCAB_SIZE = 1024
data = {"training_ds": None, "testing_ds": None}
COMPOSITE_MODELS = {
"EncoderDecoderModel": "EncoderDecoderModel-bert-bert",
"SpeechEncoderDecoderModel": "SpeechEncoderDecoderModel-wav2vec2-bert",
"VisionEncoderDecoderModel": "VisionEncoderDecoderModel-vit-gpt2",
"VisionTextDualEncoderModel": "VisionTextDualEncoderModel-vit-bert",
}
# This list contains the model architectures for which a tiny version could not be created.
# Avoid to add new architectures here - unless we have verified carefully that it's (almost) impossible to create them.
# One such case is: no model tester class is implemented for a model type (like `MT5`) because its architecture is
# identical to another one (`MT5` is based on `T5`), but trained on different datasets or with different techniques.
UNCONVERTIBLE_MODEL_ARCHITECTURES = {
"BertGenerationEncoder",
"BertGenerationDecoder",
"CamembertForSequenceClassification",
"CamembertForMultipleChoice",
"CamembertForMaskedLM",
"CamembertForCausalLM",
"CamembertForTokenClassification",
"CamembertForQuestionAnswering",
"CamembertModel",
"TFCamembertForMultipleChoice",
"TFCamembertForTokenClassification",
"TFCamembertForQuestionAnswering",
"TFCamembertForSequenceClassification",
"TFCamembertForMaskedLM",
"TFCamembertModel",
"TFCamembertForCausalLM",
"DecisionTransformerModel",
"GraphormerModel",
"InformerModel",
"JukeboxModel",
"MarianForCausalLM",
"MaskFormerSwinModel",
"MaskFormerSwinBackbone",
"MT5Model",
"MT5ForConditionalGeneration",
"UMT5ForConditionalGeneration",
"TFMT5ForConditionalGeneration",
"TFMT5Model",
"QDQBertForSequenceClassification",
"QDQBertForMaskedLM",
"QDQBertModel",
"QDQBertForTokenClassification",
"QDQBertLMHeadModel",
"QDQBertForMultipleChoice",
"QDQBertForQuestionAnswering",
"QDQBertForNextSentencePrediction",
"ReformerModelWithLMHead",
"RetriBertModel",
"Speech2Text2ForCausalLM",
"TimeSeriesTransformerModel",
"TrajectoryTransformerModel",
"TrOCRForCausalLM",
"XLMProphetNetForConditionalGeneration",
"XLMProphetNetForCausalLM",
"XLMProphetNetModel",
"XLMRobertaModel",
"XLMRobertaForTokenClassification",
"XLMRobertaForMultipleChoice",
"XLMRobertaForMaskedLM",
"XLMRobertaForCausalLM",
"XLMRobertaForSequenceClassification",
"XLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaModel",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForTokenClassification",
}
def get_processor_types_from_config_class(config_class, allowed_mappings=None):
"""Return a tuple of processors for `config_class`.
We use `tuple` here to include (potentially) both slow & fast tokenizers.
"""
# To make a uniform return type
def _to_tuple(x):
if not isinstance(x, collections.abc.Sequence):
x = (x,)
else:
x = tuple(x)
return x
if allowed_mappings is None:
allowed_mappings = ["processor", "tokenizer", "image_processor", "feature_extractor"]
processor_types = ()
# Check first if a model has `ProcessorMixin`. Otherwise, check if it has tokenizers, and/or an image processor or
# a feature extractor
if config_class in PROCESSOR_MAPPING and "processor" in allowed_mappings:
processor_types = _to_tuple(PROCESSOR_MAPPING[config_class])
else:
if config_class in TOKENIZER_MAPPING and "tokenizer" in allowed_mappings:
processor_types = TOKENIZER_MAPPING[config_class]
if config_class in IMAGE_PROCESSOR_MAPPING and "image_processor" in allowed_mappings:
processor_types += _to_tuple(IMAGE_PROCESSOR_MAPPING[config_class])
elif config_class in FEATURE_EXTRACTOR_MAPPING and "feature_extractor" in allowed_mappings:
processor_types += _to_tuple(FEATURE_EXTRACTOR_MAPPING[config_class])
# Remark: some configurations have no processor at all. For example, generic composite models like
# `EncoderDecoderModel` is used for any (compatible) text models. Also, `DecisionTransformer` doesn't
# require any processor.
# We might get `None` for some tokenizers - remove them here.
processor_types = tuple(p for p in processor_types if p is not None)
return processor_types
def get_architectures_from_config_class(config_class, arch_mappings, models_to_skip=None):
"""Return a tuple of all possible architectures attributed to a configuration class `config_class`.
For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering].
"""
# A model architecture could appear in several mappings. For example, `BartForConditionalGeneration` is in
# - MODEL_FOR_PRETRAINING_MAPPING_NAMES
# - MODEL_WITH_LM_HEAD_MAPPING_NAMES
# - MODEL_FOR_MASKED_LM_MAPPING_NAMES
# - MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
# We avoid the duplication.
architectures = set()
if models_to_skip is None:
models_to_skip = []
models_to_skip = UNCONVERTIBLE_MODEL_ARCHITECTURES.union(models_to_skip)
for mapping in arch_mappings:
if config_class in mapping:
models = mapping[config_class]
models = tuple(models) if isinstance(models, collections.abc.Sequence) else (models,)
for model in models:
if model.__name__ not in models_to_skip:
architectures.add(model)
architectures = tuple(architectures)
return architectures
def get_config_class_from_processor_class(processor_class):
"""Get the config class from a processor class.
Some config/model classes use tokenizers/feature_extractors from other models. For example, `GPT-J` uses
`GPT2Tokenizer`. If no checkpoint is found for a config class, or a checkpoint is found without necessary file(s) to
create the processor for `processor_class`, we get the config class that corresponds to `processor_class` and use it
to find a checkpoint in order to create the processor.
"""
processor_prefix = processor_class.__name__
for postfix in ["TokenizerFast", "Tokenizer", "ImageProcessor", "FeatureExtractor", "Processor"]:
processor_prefix = processor_prefix.replace(postfix, "")
# `Wav2Vec2CTCTokenizer` -> `Wav2Vec2Config`
if processor_prefix == "Wav2Vec2CTC":
processor_prefix = "Wav2Vec2"
# Find the new configuration class
new_config_name = f"{processor_prefix}Config"
new_config_class = getattr(transformers_module, new_config_name)
return new_config_class
def build_processor(config_class, processor_class, allow_no_checkpoint=False):
"""Create a processor for `processor_class`.
If a processor is not able to be built with the original arguments, this method tries to change the arguments and
call itself recursively, by inferring a new `config_class` or a new `processor_class` from another one, in order to
find a checkpoint containing the necessary files to build a processor.
The processor is not saved here. Instead, it will be saved in `convert_processors` after further changes in
`convert_processors`. For each model architecture`, a copy will be created and saved along the built model.
"""
# Currently, this solely uses the docstring in the source file of `config_class` to find a checkpoint.
checkpoint = get_checkpoint_from_config_class(config_class)
if checkpoint is None:
# try to get the checkpoint from the config class for `processor_class`.
# This helps cases like `XCLIPConfig` and `VideoMAEFeatureExtractor` to find a checkpoint from `VideoMAEConfig`.
config_class_from_processor_class = get_config_class_from_processor_class(processor_class)
checkpoint = get_checkpoint_from_config_class(config_class_from_processor_class)
processor = None
try:
processor = processor_class.from_pretrained(checkpoint)
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
# Try to get a new processor class from checkpoint. This is helpful for a checkpoint without necessary file to load
# processor while `processor_class` is an Auto class. For example, `sew` has `Wav2Vec2Processor` in
# `PROCESSOR_MAPPING_NAMES`, its `tokenizer_class` is `AutoTokenizer`, and the checkpoint
# `https://huggingface.co/asapp/sew-tiny-100k` has no tokenizer file, but we can get
# `tokenizer_class: Wav2Vec2CTCTokenizer` from the config file. (The new processor class won't be able to load from
# `checkpoint`, but it helps this recursive method to find a way to build a processor).
if (
processor is None
and checkpoint is not None
and issubclass(processor_class, (PreTrainedTokenizerBase, AutoTokenizer))
):
try:
config = AutoConfig.from_pretrained(checkpoint)
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
config = None
if config is not None:
if not isinstance(config, config_class):
raise ValueError(
f"`config` (which is of type {config.__class__.__name__}) should be an instance of `config_class`"
f" ({config_class.__name__})!"
)
tokenizer_class = config.tokenizer_class
new_processor_class = None
if tokenizer_class is not None:
new_processor_class = getattr(transformers_module, tokenizer_class)
if new_processor_class != processor_class:
processor = build_processor(config_class, new_processor_class)
# If `tokenizer_class` is not specified in `config`, let's use `config` to get the process class via auto
# mappings, but only allow the tokenizer mapping being used. This is to make `Wav2Vec2Conformer` build
if processor is None:
new_processor_classes = get_processor_types_from_config_class(
config.__class__, allowed_mappings=["tokenizer"]
)
# Used to avoid infinite recursion between a pair of fast/slow tokenizer types
names = [
x.__name__.replace("Fast", "") for x in [processor_class, new_processor_class] if x is not None
]
new_processor_classes = [
x for x in new_processor_classes if x is not None and x.__name__.replace("Fast", "") not in names
]
if len(new_processor_classes) > 0:
new_processor_class = new_processor_classes[0]
# Let's use fast tokenizer if there is any
for x in new_processor_classes:
if x.__name__.endswith("Fast"):
new_processor_class = x
break
processor = build_processor(config_class, new_processor_class)
if processor is None:
# Try to build each component (tokenizer & feature extractor) of a `ProcessorMixin`.
if issubclass(processor_class, ProcessorMixin):
attrs = {}
for attr_name in processor_class.attributes:
attrs[attr_name] = []
# This could be a tuple (for tokenizers). For example, `CLIPProcessor` has
# - feature_extractor_class = "CLIPFeatureExtractor"
# - tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
attr_class_names = getattr(processor_class, f"{attr_name}_class")
if not isinstance(attr_class_names, tuple):
attr_class_names = (attr_class_names,)
for name in attr_class_names:
attr_class = getattr(transformers_module, name)
attr = build_processor(config_class, attr_class)
if attr is not None:
attrs[attr_name].append(attr)
# try to build a `ProcessorMixin`, so we can return a single value
if all(len(v) > 0 for v in attrs.values()):
try:
processor = processor_class(**{k: v[0] for k, v in attrs.items()})
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
else:
# `checkpoint` might lack some file(s) to load a processor. For example, `facebook/hubert-base-ls960`
# has no tokenizer file to load `Wav2Vec2CTCTokenizer`. In this case, we try to build a processor
# with the configuration class (for example, `Wav2Vec2Config`) corresponding to `processor_class`.
config_class_from_processor_class = get_config_class_from_processor_class(processor_class)
if config_class_from_processor_class != config_class:
processor = build_processor(config_class_from_processor_class, processor_class)
# Try to create an image processor or a feature extractor without any checkpoint
if (
processor is None
and allow_no_checkpoint
and (issubclass(processor_class, BaseImageProcessor) or issubclass(processor_class, FeatureExtractionMixin))
):
try:
processor = processor_class()
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
# validation
if processor is not None:
if not (isinstance(processor, processor_class) or processor_class.__name__.startswith("Auto")):
raise ValueError(
f"`processor` (which is of type {processor.__class__.__name__}) should be an instance of"
f" {processor_class.__name__} or an Auto class!"
)
return processor
def get_tiny_config(config_class, model_class=None, **model_tester_kwargs):
"""Retrieve a tiny configuration from `config_class` using each model's `ModelTester`.
Args:
config_class: Subclass of `PreTrainedConfig`.
Returns:
An instance of `config_class` with tiny hyperparameters
"""
model_type = config_class.model_type
# For model type like `data2vec-vision` and `donut-swin`, we can't get the config/model file name directly via
# `model_type` as it would be sth. like `configuration_data2vec_vision.py`.
# A simple way is to use `inspect.getsourcefile(config_class)`.
config_source_file = inspect.getsourcefile(config_class)
# The modeling file name without prefix (`modeling_`) and postfix (`.py`)
modeling_name = config_source_file.split(os.path.sep)[-1].replace("configuration_", "").replace(".py", "")
try:
print("Importing", model_type_to_module_name(model_type))
module_name = model_type_to_module_name(model_type)
if not modeling_name.startswith(module_name):
raise ValueError(f"{modeling_name} doesn't start with {module_name}!")
test_file = os.path.join("tests", "models", module_name, f"test_modeling_{modeling_name}.py")
models_to_model_testers = get_model_to_tester_mapping(test_file)
# Find the model tester class
model_tester_class = None
tester_classes = []
if model_class is not None:
tester_classes = get_tester_classes_for_model(test_file, model_class)
else:
for _tester_classes in models_to_model_testers.values():
tester_classes.extend(_tester_classes)
if len(tester_classes) > 0:
# sort with the length of the class names first, then the alphabetical order
# This is to avoid `T5EncoderOnlyModelTest` is used instead of `T5ModelTest`, which has
# `is_encoder_decoder=False` and causes some pipeline tests failing (also failures in `Optimum` CI).
# TODO: More fine grained control of the desired tester class.
model_tester_class = sorted(tester_classes, key=lambda x: (len(x.__name__), x.__name__))[0]
except ModuleNotFoundError:
error = f"Tiny config not created for {model_type} - cannot find the testing module from the model name."
raise ValueError(error)
if model_tester_class is None:
error = f"Tiny config not created for {model_type} - no model tester is found in the testing module."
raise ValueError(error)
# `parent` is an instance of `unittest.TestCase`, but we don't need it here.
model_tester = model_tester_class(parent=None, **model_tester_kwargs)
if hasattr(model_tester, "get_pipeline_config"):
return model_tester.get_pipeline_config()
elif hasattr(model_tester, "prepare_config_and_inputs"):
# `PoolFormer` has no `get_config` defined. Furthermore, it's better to use `prepare_config_and_inputs` even if
# `get_config` is defined, since there might be some extra changes in `prepare_config_and_inputs`.
return model_tester.prepare_config_and_inputs()[0]
elif hasattr(model_tester, "get_config"):
return model_tester.get_config()
else:
error = (
f"Tiny config not created for {model_type} - the model tester {model_tester_class.__name__} lacks"
" necessary method to create config."
)
raise ValueError(error)
def convert_tokenizer(tokenizer_fast: PreTrainedTokenizerFast):
new_tokenizer = tokenizer_fast.train_new_from_iterator(
data["training_ds"]["text"], TARGET_VOCAB_SIZE, show_progress=False
)
# Make sure it at least runs
if not isinstance(new_tokenizer, LayoutLMv3TokenizerFast):
new_tokenizer(data["testing_ds"]["text"])
return new_tokenizer
def convert_feature_extractor(feature_extractor, tiny_config):
to_convert = False
kwargs = {}
if hasattr(tiny_config, "image_size"):
kwargs["size"] = tiny_config.image_size
kwargs["crop_size"] = tiny_config.image_size
to_convert = True
elif (
hasattr(tiny_config, "vision_config")
and tiny_config.vision_config is not None
and hasattr(tiny_config.vision_config, "image_size")
):
kwargs["size"] = tiny_config.vision_config.image_size
kwargs["crop_size"] = tiny_config.vision_config.image_size
to_convert = True
# Speech2TextModel specific.
if hasattr(tiny_config, "input_feat_per_channel"):
kwargs["feature_size"] = tiny_config.input_feat_per_channel
kwargs["num_mel_bins"] = tiny_config.input_feat_per_channel
to_convert = True
if to_convert:
feature_extractor = feature_extractor.__class__(**kwargs)
return feature_extractor
def convert_processors(processors, tiny_config, output_folder, result):
"""Change a processor to work with smaller inputs.
For tokenizers, we try to reduce their vocabulary size.
For feature extractor, we use smaller image size or change
other attributes using the values from `tiny_config`. See `convert_feature_extractor`.
This method should not fail: we catch the errors and put them in `result["warnings"]` with descriptive messages.
"""
def _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False):
"""Set tokenizer(s) to `None` if the fast/slow tokenizers have different values for `vocab_size` or `length`.
If `keep_fast_tokenizer=True`, the fast tokenizer will be kept.
"""
# sanity check 1: fast and slow tokenizers should be compatible (vocab_size)
if fast_tokenizer is not None and slow_tokenizer is not None:
if fast_tokenizer.vocab_size != slow_tokenizer.vocab_size:
warning_messagae = (
"The fast/slow tokenizers "
f"({fast_tokenizer.__class__.__name__}/{slow_tokenizer.__class__.__name__}) have different "
"vocabulary size: "
f"fast_tokenizer.vocab_size = {fast_tokenizer.vocab_size} and "
f"slow_tokenizer.vocab_size = {slow_tokenizer.vocab_size}."
)
result["warnings"].append(warning_messagae)
if not keep_fast_tokenizer:
fast_tokenizer = None
slow_tokenizer = None
# sanity check 2: fast and slow tokenizers should be compatible (length)
if fast_tokenizer is not None and slow_tokenizer is not None:
if len(fast_tokenizer) != len(slow_tokenizer):
warning_messagae = (
f"The fast/slow tokenizers () have different length: "
f"len(fast_tokenizer) = {len(fast_tokenizer)} and "
f"len(slow_tokenizer) = {len(slow_tokenizer)}."
)
result["warnings"].append(warning_messagae)
if not keep_fast_tokenizer:
fast_tokenizer = None
slow_tokenizer = None
return fast_tokenizer, slow_tokenizer
tokenizers = []
feature_extractors = []
for processor in processors:
if isinstance(processor, PreTrainedTokenizerBase):
if processor.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}:
tokenizers.append(processor)
elif isinstance(processor, BaseImageProcessor):
if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}:
feature_extractors.append(processor)
elif isinstance(processor, FeatureExtractionMixin):
if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}:
feature_extractors.append(processor)
elif isinstance(processor, ProcessorMixin):
if hasattr(processor, "tokenizer"):
if processor.tokenizer.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}:
tokenizers.append(processor.tokenizer)
# Currently, we only have these 2 possibilities
if hasattr(processor, "image_processor"):
if processor.image_processor.__class__.__name__ not in {
x.__class__.__name__ for x in feature_extractors
}:
feature_extractors.append(processor.image_processor)
elif hasattr(processor, "feature_extractor"):
if processor.feature_extractor.__class__.__name__ not in {
x.__class__.__name__ for x in feature_extractors
}:
feature_extractors.append(processor.feature_extractor)
# check the built processors have the unique type
num_types = len({x.__class__.__name__ for x in feature_extractors})
if num_types >= 2:
raise ValueError(f"`feature_extractors` should contain at most 1 type, but it contains {num_types} types!")
num_types = len({x.__class__.__name__.replace("Fast", "") for x in tokenizers})
if num_types >= 2:
raise ValueError(f"`tokenizers` should contain at most 1 tokenizer type, but it contains {num_types} types!")
fast_tokenizer = None
slow_tokenizer = None
for tokenizer in tokenizers:
if isinstance(tokenizer, PreTrainedTokenizerFast):
fast_tokenizer = tokenizer
else:
slow_tokenizer = tokenizer
# If the (original) fast/slow tokenizers don't correspond, keep only the fast tokenizer.
# This doesn't necessarily imply the fast/slow tokenizers in a single Hub repo. has issues.
# It's more of an issue in `build_processor` which tries to get a checkpoint with as much effort as possible.
# For `YosoModel` (which uses `AlbertTokenizer(Fast)`), its real (Hub) checkpoint doesn't contain valid files to
# load the slower tokenizer (`AlbertTokenizer`), and it ends up finding the (canonical) checkpoint of `AlbertModel`,
# which has different vocabulary.
# TODO: Try to improve `build_processor`'s definition and/or usage to avoid the above situation in the first place.
fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=True)
original_fast_tokenizer, original_slow_tokenizer = fast_tokenizer, slow_tokenizer
if fast_tokenizer:
try:
# Wav2Vec2ForCTC , ByT5Tokenizer etc. all are already small enough and have no fast version that can
# be retrained
if fast_tokenizer.vocab_size > TARGET_VOCAB_SIZE:
fast_tokenizer = convert_tokenizer(fast_tokenizer)
except Exception:
result["warnings"].append(
(
f"Failed to convert the fast tokenizer for {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
# If `fast_tokenizer` exists, `slow_tokenizer` should correspond to it.
if fast_tokenizer:
# Make sure the fast tokenizer can be saved
try:
# We don't save it to `output_folder` at this moment - only at the end of this function.
with tempfile.TemporaryDirectory() as tmpdir:
fast_tokenizer.save_pretrained(tmpdir)
try:
slow_tokenizer = AutoTokenizer.from_pretrained(tmpdir, use_fast=False)
except Exception:
result["warnings"].append(
(
f"Failed to load the slow tokenizer saved from {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
# Let's just keep the fast version
slow_tokenizer = None
except Exception:
result["warnings"].append(
(
f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
fast_tokenizer = None
# If the (possibly converted) fast/slow tokenizers don't correspond, set them to `None`, and use the original
# tokenizers.
fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False)
# If there is any conversion failed, we keep the original tokenizers.
if (original_fast_tokenizer is not None and fast_tokenizer is None) or (
original_slow_tokenizer is not None and slow_tokenizer is None
):
warning_messagae = (
"There are some issues when converting the fast/slow tokenizers. The original tokenizers from the Hub "
" will be used instead."
)
result["warnings"].append(warning_messagae)
# Let's use the original version at the end (`original_fast_tokenizer` and `original_slow_tokenizer`)
fast_tokenizer = original_fast_tokenizer
slow_tokenizer = original_slow_tokenizer
# Make sure the fast tokenizer can be saved
if fast_tokenizer:
# We don't save it to `output_folder` at this moment - only at the end of this function.
with tempfile.TemporaryDirectory() as tmpdir:
try:
fast_tokenizer.save_pretrained(tmpdir)
except Exception:
result["warnings"].append(
(
f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
fast_tokenizer = None
# Make sure the slow tokenizer can be saved
if slow_tokenizer:
# We don't save it to `output_folder` at this moment - only at the end of this function.
with tempfile.TemporaryDirectory() as tmpdir:
try:
slow_tokenizer.save_pretrained(tmpdir)
except Exception:
result["warnings"].append(
(
f"Failed to save the slow tokenizer for {slow_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
slow_tokenizer = None
# update feature extractors using the tiny config
try:
feature_extractors = [convert_feature_extractor(p, tiny_config) for p in feature_extractors]
except Exception:
result["warnings"].append(
(
"Failed to convert feature extractors.",
traceback.format_exc(),
)
)
feature_extractors = []
if hasattr(tiny_config, "max_position_embeddings") and tiny_config.max_position_embeddings > 0:
if fast_tokenizer is not None:
if fast_tokenizer.__class__.__name__ in [
"RobertaTokenizerFast",
"XLMRobertaTokenizerFast",
"LongformerTokenizerFast",
"MPNetTokenizerFast",
]:
fast_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2
else:
fast_tokenizer.model_max_length = tiny_config.max_position_embeddings
if slow_tokenizer is not None:
if slow_tokenizer.__class__.__name__ in [
"RobertaTokenizer",
"XLMRobertaTokenizer",
"LongformerTokenizer",
"MPNetTokenizer",
]:
slow_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2
else:
slow_tokenizer.model_max_length = tiny_config.max_position_embeddings
processors = [fast_tokenizer, slow_tokenizer] + feature_extractors
processors = [p for p in processors if p is not None]
for p in processors:
p.save_pretrained(output_folder)
return processors
def get_checkpoint_dir(output_dir, model_arch):
"""Get framework-agnostic architecture name. Used to save all PT/TF/Flax models into the same directory."""
arch_name = model_arch.__name__
if arch_name.startswith("TF"):
arch_name = arch_name[2:]
elif arch_name.startswith("Flax"):
arch_name = arch_name[4:]
return os.path.join(output_dir, arch_name)
def build_model(model_arch, tiny_config, output_dir):
"""Create and save a model for `model_arch`.
Also copy the set of processors to each model (under the same model type) output folder.
"""
checkpoint_dir = get_checkpoint_dir(output_dir, model_arch)
processor_output_dir = os.path.join(output_dir, "processors")
# copy the (same set of) processors (for a model type) to the model arch. specific folder
if os.path.isdir(processor_output_dir):
shutil.copytree(processor_output_dir, checkpoint_dir, dirs_exist_ok=True)
tiny_config = copy.deepcopy(tiny_config)
if any(model_arch.__name__.endswith(x) for x in ["ForCausalLM", "LMHeadModel"]):
tiny_config.is_encoder_decoder = False
tiny_config.is_decoder = True
model = model_arch(config=tiny_config)
model.save_pretrained(checkpoint_dir)
model.from_pretrained(checkpoint_dir)
return model
def fill_result_with_error(result, error, trace, models_to_create):
"""Fill `result` with errors for all target model arch if we can't build processor"""
error = (error, trace)
result["error"] = error
for framework in FRAMEWORKS:
if framework in models_to_create:
result[framework] = {}
for model_arch in models_to_create[framework]:
result[framework][model_arch.__name__] = {"model": None, "checkpoint": None, "error": error}
result["processor"] = {p.__class__.__name__: p.__class__.__name__ for p in result["processor"].values()}
def upload_model(model_dir, organization, token):
"""Upload the tiny models"""
arch_name = model_dir.split(os.path.sep)[-1]
repo_name = f"tiny-random-{arch_name}"
repo_id = f"{organization}/{repo_name}"
repo_exist = False
error = None
try:
create_repo(repo_id=repo_id, exist_ok=False, repo_type="model", token=token)
except Exception as e:
error = e
if "You already created" in str(e):
error = None
logger.warning("Remote repository exists and will be cloned.")
repo_exist = True
try:
create_repo(repo_id=repo_id, exist_ok=True, repo_type="model", token=token)
except Exception as e:
error = e
if error is not None:
raise error
with tempfile.TemporaryDirectory() as tmpdir:
repo = Repository(local_dir=tmpdir, clone_from=repo_id, token=token)
repo.git_pull()
shutil.copytree(model_dir, tmpdir, dirs_exist_ok=True)
if repo_exist:
# Open a PR on the existing Hub repo.
hub_pr_url = upload_folder(
folder_path=model_dir,
repo_id=repo_id,
repo_type="model",
commit_message=f"Update tiny models for {arch_name}",
commit_description=f"Upload tiny models for {arch_name}",
create_pr=True,
token=token,
)
logger.warning(f"PR open in {hub_pr_url}.")
# TODO: We need this information?
else:
# Push to Hub repo directly
repo.git_add(auto_lfs_track=True)
repo.git_commit(f"Upload tiny models for {arch_name}")
repo.git_push(blocking=True) # this prints a progress bar with the upload
logger.warning(f"Tiny models {arch_name} pushed to {repo_id}.")
def build_composite_models(config_class, output_dir):
import tempfile
from transformers import (
BertConfig,
BertLMHeadModel,
BertModel,
BertTokenizer,
BertTokenizerFast,
EncoderDecoderModel,
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
GPT2TokenizerFast,
SpeechEncoderDecoderModel,
TFEncoderDecoderModel,
TFVisionEncoderDecoderModel,
TFVisionTextDualEncoderModel,
VisionEncoderDecoderModel,
VisionTextDualEncoderModel,
ViTConfig,
ViTFeatureExtractor,
ViTModel,
Wav2Vec2Config,
Wav2Vec2Model,
Wav2Vec2Processor,
)
# These will be removed at the end if they are empty
result = {"error": None, "warnings": []}
if config_class.model_type == "encoder-decoder":
encoder_config_class = BertConfig
decoder_config_class = BertConfig
encoder_processor = (BertTokenizerFast, BertTokenizer)
decoder_processor = (BertTokenizerFast, BertTokenizer)
encoder_class = BertModel
decoder_class = BertLMHeadModel
model_class = EncoderDecoderModel
tf_model_class = TFEncoderDecoderModel
elif config_class.model_type == "vision-encoder-decoder":
encoder_config_class = ViTConfig
decoder_config_class = GPT2Config
encoder_processor = (ViTFeatureExtractor,)
decoder_processor = (GPT2TokenizerFast, GPT2Tokenizer)
encoder_class = ViTModel
decoder_class = GPT2LMHeadModel
model_class = VisionEncoderDecoderModel
tf_model_class = TFVisionEncoderDecoderModel
elif config_class.model_type == "speech-encoder-decoder":
encoder_config_class = Wav2Vec2Config
decoder_config_class = BertConfig
encoder_processor = (Wav2Vec2Processor,)
decoder_processor = (BertTokenizerFast, BertTokenizer)
encoder_class = Wav2Vec2Model
decoder_class = BertLMHeadModel
model_class = SpeechEncoderDecoderModel
tf_model_class = None
elif config_class.model_type == "vision-text-dual-encoder":
# Not encoder-decoder, but encoder-encoder. We just keep the same name as above to make code easier
encoder_config_class = ViTConfig
decoder_config_class = BertConfig
encoder_processor = (ViTFeatureExtractor,)
decoder_processor = (BertTokenizerFast, BertTokenizer)
encoder_class = ViTModel
decoder_class = BertModel
model_class = VisionTextDualEncoderModel
tf_model_class = TFVisionTextDualEncoderModel
with tempfile.TemporaryDirectory() as tmpdir:
try:
# build encoder
models_to_create = {"processor": encoder_processor, "pytorch": (encoder_class,), "tensorflow": []}
encoder_output_dir = os.path.join(tmpdir, "encoder")
build(encoder_config_class, models_to_create, encoder_output_dir)
# build decoder
models_to_create = {"processor": decoder_processor, "pytorch": (decoder_class,), "tensorflow": []}
decoder_output_dir = os.path.join(tmpdir, "decoder")
build(decoder_config_class, models_to_create, decoder_output_dir)
# build encoder-decoder
encoder_path = os.path.join(encoder_output_dir, encoder_class.__name__)
decoder_path = os.path.join(decoder_output_dir, decoder_class.__name__)
if config_class.model_type != "vision-text-dual-encoder":
# Specify these explicitly for encoder-decoder like models, but not for `vision-text-dual-encoder` as it
# has no decoder.
decoder_config = decoder_config_class.from_pretrained(decoder_path)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
model = model_class.from_encoder_decoder_pretrained(
encoder_path,
decoder_path,
decoder_config=decoder_config,
)
elif config_class.model_type == "vision-text-dual-encoder":
model = model_class.from_vision_text_pretrained(encoder_path, decoder_path)
model_path = os.path.join(
output_dir,
f"{model_class.__name__}-{encoder_config_class.model_type}-{decoder_config_class.model_type}",
)
model.save_pretrained(model_path)
if tf_model_class is not None:
model = tf_model_class.from_pretrained(model_path, from_pt=True)
model.save_pretrained(model_path)
# copy the processors
encoder_processor_path = os.path.join(encoder_output_dir, "processors")
decoder_processor_path = os.path.join(decoder_output_dir, "processors")
if os.path.isdir(encoder_processor_path):
shutil.copytree(encoder_processor_path, model_path, dirs_exist_ok=True)
if os.path.isdir(decoder_processor_path):
shutil.copytree(decoder_processor_path, model_path, dirs_exist_ok=True)
# fill `result`
result["processor"] = {x.__name__: x.__name__ for x in encoder_processor + decoder_processor}
result["pytorch"] = {model_class.__name__: {"model": model_class.__name__, "checkpoint": model_path}}
result["tensorflow"] = {}
if tf_model_class is not None:
result["tensorflow"] = {
tf_model_class.__name__: {"model": tf_model_class.__name__, "checkpoint": model_path}
}
except Exception:
result["error"] = (
f"Failed to build models for {config_class.__name__}.",
traceback.format_exc(),
)
if not result["error"]:
del result["error"]
if not result["warnings"]:
del result["warnings"]
return result
def get_token_id_from_tokenizer(token_id_name, tokenizer, original_token_id):
"""Use `tokenizer` to get the values of `bos_token_id`, `eos_token_ids`, etc.
The argument `token_id_name` should be a string ending with `_token_id`, and `original_token_id` should be an
integer that will be return if `tokenizer` has no token corresponding to `token_id_name`.
"""
token_id = original_token_id
if not token_id_name.endswith("_token_id"):
raise ValueError(f"`token_id_name` is {token_id_name}, which doesn't end with `_token_id`!")
token = getattr(tokenizer, token_id_name.replace("_token_id", "_token"), None)
if token is not None:
if isinstance(tokenizer, PreTrainedTokenizerFast):
token_id = tokenizer._convert_token_to_id_with_added_voc(token)
else:
token_id = tokenizer._convert_token_to_id(token)
return token_id
def get_config_overrides(config_class, processors):
config_overrides = {}
# Check if there is any tokenizer (prefer fast version if any)
tokenizer = None
for processor in processors:
if isinstance(processor, PreTrainedTokenizerFast):
tokenizer = processor
break
elif isinstance(processor, PreTrainedTokenizer):
tokenizer = processor
if tokenizer is None:
return config_overrides
# Get some properties of the (already converted) tokenizer (smaller vocab size, special token ids, etc.)
# We use `len(tokenizer)` instead of `tokenizer.vocab_size` to avoid potential issues for tokenizers with non-empty
# `added_tokens_encoder`. One example is the `DebertaV2Tokenizer` where the mask token is the extra token.
vocab_size = len(tokenizer)
# The original checkpoint has length `35998`, but it doesn't have ids `30400` and `30514` but instead `35998` and
# `35999`.
if config_class.__name__ == "GPTSanJapaneseConfig":
vocab_size += 2
config_overrides["vocab_size"] = vocab_size
# Used to create a new model tester with `tokenizer.vocab_size` in order to get the (updated) special token ids.
model_tester_kwargs = {"vocab_size": vocab_size}
# CLIP-like models have `text_model_tester` and `vision_model_tester`, and we need to pass `vocab_size` to
# `text_model_tester` via `text_kwargs`. The same trick is also necessary for `Flava`.
if config_class.__name__ in [
"AlignConfig",
"AltCLIPConfig",
"ChineseCLIPConfig",
"CLIPSegConfig",
"ClapConfig",
"CLIPConfig",
"GroupViTConfig",
"OwlViTConfig",
"XCLIPConfig",
"FlavaConfig",
"BlipConfig",
"Blip2Config",
]:
del model_tester_kwargs["vocab_size"]
model_tester_kwargs["text_kwargs"] = {"vocab_size": vocab_size}
# `FSMTModelTester` accepts `src_vocab_size` and `tgt_vocab_size` but not `vocab_size`.
elif config_class.__name__ == "FSMTConfig":
del model_tester_kwargs["vocab_size"]
model_tester_kwargs["src_vocab_size"] = tokenizer.src_vocab_size
model_tester_kwargs["tgt_vocab_size"] = tokenizer.tgt_vocab_size
_tiny_config = get_tiny_config(config_class, **model_tester_kwargs)
# handle the possibility of `text_config` inside `_tiny_config` for clip-like models (`owlvit`, `groupvit`, etc.)
if hasattr(_tiny_config, "text_config"):
_tiny_config = _tiny_config.text_config
# Collect values of some special token ids
for attr in dir(_tiny_config):
if attr.endswith("_token_id"):
token_id = getattr(_tiny_config, attr)
if token_id is not None:
# Using the token id values from `tokenizer` instead of from `_tiny_config`.
token_id = get_token_id_from_tokenizer(attr, tokenizer, original_token_id=token_id)
config_overrides[attr] = token_id
if config_class.__name__ == "FSMTConfig":
config_overrides["src_vocab_size"] = tokenizer.src_vocab_size
config_overrides["tgt_vocab_size"] = tokenizer.tgt_vocab_size
# `FSMTConfig` has `DecoderConfig` as `decoder` attribute.
config_overrides["decoder"] = configuration_fsmt.DecoderConfig(
vocab_size=tokenizer.tgt_vocab_size, bos_token_id=config_overrides["eos_token_id"]
)
return config_overrides
def build(config_class, models_to_create, output_dir):
"""Create all models for a certain model type.
Args:
config_class (`PretrainedConfig`):
A subclass of `PretrainedConfig` that is used to determine `models_to_create`.
models_to_create (`dict`):
A dictionary containing the processor/model classes that we want to create the instances. These models are
of the same model type which is associated to `config_class`.
output_dir (`str`):
The directory to save all the checkpoints. Each model architecture will be saved in a subdirectory under
it. Models in different frameworks with the same architecture will be saved in the same subdirectory.
"""
if data["training_ds"] is None or data["testing_ds"] is None:
ds = load_dataset("wikitext", "wikitext-2-raw-v1")
data["training_ds"] = ds["train"]
data["testing_ds"] = ds["test"]
if config_class.model_type in [
"encoder-decoder",
"vision-encoder-decoder",
"speech-encoder-decoder",
"vision-text-dual-encoder",
]:
return build_composite_models(config_class, output_dir)
result = {k: {} for k in models_to_create}
# These will be removed at the end if they are empty
result["error"] = None
result["warnings"] = []
# Build processors
processor_classes = models_to_create["processor"]
if len(processor_classes) == 0:
error = f"No processor class could be found in {config_class.__name__}."
fill_result_with_error(result, error, None, models_to_create)
logger.error(result["error"][0])
return result
for processor_class in processor_classes:
try:
processor = build_processor(config_class, processor_class, allow_no_checkpoint=True)
if processor is not None:
result["processor"][processor_class] = processor
except Exception:
error = f"Failed to build processor for {processor_class.__name__}."
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
if len(result["processor"]) == 0:
error = f"No processor could be built for {config_class.__name__}."
fill_result_with_error(result, error, None, models_to_create)
logger.error(result["error"][0])
return result
try:
tiny_config = get_tiny_config(config_class)
except Exception as e:
error = f"Failed to get tiny config for {config_class.__name__}: {e}"
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
# Convert the processors (reduce vocabulary size, smaller image size, etc.)
processors = list(result["processor"].values())
processor_output_folder = os.path.join(output_dir, "processors")
try:
processors = convert_processors(processors, tiny_config, processor_output_folder, result)
except Exception:
error = "Failed to convert the processors."
trace = traceback.format_exc()
result["warnings"].append((error, trace))
if len(processors) == 0:
error = f"No processor is returned by `convert_processors` for {config_class.__name__}."
fill_result_with_error(result, error, None, models_to_create)
logger.error(result["error"][0])
return result
try:
config_overrides = get_config_overrides(config_class, processors)
except Exception as e:
error = f"Failure occurs while calling `get_config_overrides`: {e}"
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
# Just for us to see this easily in the report
if "vocab_size" in config_overrides:
result["vocab_size"] = config_overrides["vocab_size"]
# Update attributes that `vocab_size` involves
for k, v in config_overrides.items():
if hasattr(tiny_config, k):
setattr(tiny_config, k, v)
# So far, we only have to deal with `text_config`, as `config_overrides` contains text-related attributes only.
elif (
hasattr(tiny_config, "text_config")
and tiny_config.text_config is not None
and hasattr(tiny_config.text_config, k)
):
setattr(tiny_config.text_config, k, v)
# If `text_config_dict` exists, we need to update its value here too in order to # make
# `save_pretrained -> from_pretrained` work.
if hasattr(tiny_config, "text_config_dict"):
tiny_config.text_config_dict[k] = v
if result["warnings"]:
logger.warning(result["warnings"][0][0])
# update `result["processor"]`
result["processor"] = {type(p).__name__: p.__class__.__name__ for p in processors}
for pytorch_arch in models_to_create["pytorch"]:
result["pytorch"][pytorch_arch.__name__] = {}
error = None
try:
model = build_model(pytorch_arch, tiny_config, output_dir=output_dir)
except Exception as e:
model = None
error = f"Failed to create the pytorch model for {pytorch_arch}: {e}"
trace = traceback.format_exc()
result["pytorch"][pytorch_arch.__name__]["model"] = model.__class__.__name__ if model is not None else None
result["pytorch"][pytorch_arch.__name__]["checkpoint"] = (
get_checkpoint_dir(output_dir, pytorch_arch) if model is not None else None
)
if error is not None:
result["pytorch"][pytorch_arch.__name__]["error"] = (error, trace)
logger.error(f"{pytorch_arch.__name__}: {error}")
for tensorflow_arch in models_to_create["tensorflow"]:
# Make PT/TF weights compatible
pt_arch_name = tensorflow_arch.__name__[2:] # Remove `TF`
pt_arch = getattr(transformers_module, pt_arch_name)
result["tensorflow"][tensorflow_arch.__name__] = {}
error = None
if pt_arch.__name__ in result["pytorch"] and result["pytorch"][pt_arch.__name__]["checkpoint"] is not None:
ckpt = get_checkpoint_dir(output_dir, pt_arch)
# Use the same weights from PyTorch.
try:
model = tensorflow_arch.from_pretrained(ckpt, from_pt=True)
model.save_pretrained(ckpt)
except Exception as e:
# Conversion may fail. Let's not create a model with different weights to avoid confusion (for now).
model = None
error = f"Failed to convert the pytorch model to the tensorflow model for {pt_arch}: {e}"
trace = traceback.format_exc()
else:
try:
model = build_model(tensorflow_arch, tiny_config, output_dir=output_dir)
except Exception as e:
model = None
error = f"Failed to create the tensorflow model for {tensorflow_arch}: {e}"
trace = traceback.format_exc()
result["tensorflow"][tensorflow_arch.__name__]["model"] = (
model.__class__.__name__ if model is not None else None
)
result["tensorflow"][tensorflow_arch.__name__]["checkpoint"] = (
get_checkpoint_dir(output_dir, tensorflow_arch) if model is not None else None
)
if error is not None:
result["tensorflow"][tensorflow_arch.__name__]["error"] = (error, trace)
logger.error(f"{tensorflow_arch.__name__}: {error}")
if not result["error"]:
del result["error"]
if not result["warnings"]:
del result["warnings"]
return result
def build_tiny_model_summary(results, organization=None, token=None):
"""Build a summary: a dictionary of the form
{
model architecture name:
{
"tokenizer_classes": [...],
"processor_classes": [...],
"model_classes": [...],
}
..
}
"""
tiny_model_summary = {}
for config_name in results:
processors = [key for key, value in results[config_name]["processor"].items()]
tokenizer_classes = sorted([x for x in processors if x.endswith("TokenizerFast") or x.endswith("Tokenizer")])
processor_classes = sorted([x for x in processors if x not in tokenizer_classes])
for framework in FRAMEWORKS:
if framework not in results[config_name]:
continue
for arch_name in results[config_name][framework]:
model_classes = [arch_name]
base_arch_name = arch_name[2:] if arch_name.startswith("TF") else arch_name
# tiny model is not created for `arch_name`
if results[config_name][framework][arch_name]["model"] is None:
model_classes = []
if base_arch_name not in tiny_model_summary:
tiny_model_summary[base_arch_name] = {}
tiny_model_summary[base_arch_name].update(
{
"tokenizer_classes": tokenizer_classes,
"processor_classes": processor_classes,
}
)
tiny_model_summary[base_arch_name]["model_classes"] = sorted(
tiny_model_summary[base_arch_name].get("model_classes", []) + model_classes
)
if organization is not None:
repo_name = f"tiny-random-{base_arch_name}"
# composite models' checkpoints have more precise repo. names on the Hub.
if base_arch_name in COMPOSITE_MODELS:
repo_name = f"tiny-random-{COMPOSITE_MODELS[base_arch_name]}"
repo_id = f"{organization}/{repo_name}"
try:
commit_hash = hf_api.repo_info(repo_id, token=token).sha
except Exception:
# The directory is not created, but processor(s) is/are included in `results`.
logger.warning(f"Failed to get information for {repo_id}.\n{traceback.format_exc()}")
del tiny_model_summary[base_arch_name]
continue
tiny_model_summary[base_arch_name]["sha"] = commit_hash
return tiny_model_summary
def build_failed_report(results, include_warning=True):
failed_results = {}
for config_name in results:
if "error" in results[config_name]:
if config_name not in failed_results:
failed_results[config_name] = {}
failed_results[config_name] = {"error": results[config_name]["error"]}
if include_warning and "warnings" in results[config_name]:
if config_name not in failed_results:
failed_results[config_name] = {}
failed_results[config_name]["warnings"] = results[config_name]["warnings"]
for framework in FRAMEWORKS:
if framework not in results[config_name]:
continue
for arch_name in results[config_name][framework]:
if "error" in results[config_name][framework][arch_name]:
if config_name not in failed_results:
failed_results[config_name] = {}
if framework not in failed_results[config_name]:
failed_results[config_name][framework] = {}
if arch_name not in failed_results[config_name][framework]:
failed_results[config_name][framework][arch_name] = {}
error = results[config_name][framework][arch_name]["error"]
failed_results[config_name][framework][arch_name]["error"] = error
return failed_results
def build_simple_report(results):
text = ""
failed_text = ""
for config_name in results:
for framework in FRAMEWORKS:
if framework not in results[config_name]:
continue
for arch_name in results[config_name][framework]:
if "error" in results[config_name][framework][arch_name]:
result = results[config_name][framework][arch_name]["error"]
failed_text += f"{arch_name}: {result[0]}\n"
else:
result = ("OK",)
text += f"{arch_name}: {result[0]}\n"
return text, failed_text
def update_tiny_model_summary_file(report_path):
with open(os.path.join(report_path, "tiny_model_summary.json")) as fp:
new_data = json.load(fp)
with open("tests/utils/tiny_model_summary.json") as fp:
data = json.load(fp)
for key, value in new_data.items():
if key not in data:
data[key] = value
else:
for attr in ["tokenizer_classes", "processor_classes", "model_classes"]:
# we might get duplication here. We will remove them below when creating `updated_data`.
data[key][attr].extend(value[attr])
new_sha = value.get("sha", None)
if new_sha is not None:
data[key]["sha"] = new_sha
updated_data = {}
for key in sorted(data.keys()):
updated_data[key] = {}
for attr, value in data[key].items():
# deduplication and sort
updated_data[key][attr] = sorted(set(value)) if attr != "sha" else value
with open(os.path.join(report_path, "updated_tiny_model_summary.json"), "w") as fp:
json.dump(updated_data, fp, indent=4, ensure_ascii=False)
def create_tiny_models(
output_path,
all,
model_types,
models_to_skip,
no_check,
upload,
organization,
token,
num_workers=1,
):
clone_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if os.getcwd() != clone_path:
raise ValueError(f"This script should be run from the root of the clone of `transformers` {clone_path}")
report_path = os.path.join(output_path, "reports")
os.makedirs(report_path)
_pytorch_arch_mappings = [
x
for x in dir(transformers_module)
if x.startswith("MODEL_") and x.endswith("_MAPPING") and x != "MODEL_NAMES_MAPPING"
]
_tensorflow_arch_mappings = [
x for x in dir(transformers_module) if x.startswith("TF_MODEL_") and x.endswith("_MAPPING")
]
pytorch_arch_mappings = [getattr(transformers_module, x) for x in _pytorch_arch_mappings]
tensorflow_arch_mappings = [getattr(transformers_module, x) for x in _tensorflow_arch_mappings]
config_classes = CONFIG_MAPPING.values()
if not all:
config_classes = [CONFIG_MAPPING[model_type] for model_type in model_types]
# A map from config classes to tuples of processors (tokenizer, feature extractor, processor) classes
processor_type_map = {c: get_processor_types_from_config_class(c) for c in config_classes}
to_create = {}
for c in config_classes:
processors = processor_type_map[c]
models = get_architectures_from_config_class(c, pytorch_arch_mappings, models_to_skip)
tf_models = get_architectures_from_config_class(c, tensorflow_arch_mappings, models_to_skip)
if len(models) + len(tf_models) > 0:
to_create[c] = {"processor": processors, "pytorch": models, "tensorflow": tf_models}
results = {}
if num_workers <= 1:
for c, models_to_create in list(to_create.items()):
print(f"Create models for {c.__name__} ...")
result = build(c, models_to_create, output_dir=os.path.join(output_path, c.model_type))
results[c.__name__] = result
print("=" * 40)
else:
all_build_args = []
for c, models_to_create in list(to_create.items()):
all_build_args.append((c, models_to_create, os.path.join(output_path, c.model_type)))
with multiprocessing.Pool() as pool:
results = pool.starmap(build, all_build_args)
results = {buid_args[0].__name__: result for buid_args, result in zip(all_build_args, results)}
if upload:
if organization is None:
raise ValueError("The argument `organization` could not be `None`. No model is uploaded")
to_upload = []
for model_type in os.listdir(output_path):
# This is the directory containing the reports
if model_type == "reports":
continue
for arch in os.listdir(os.path.join(output_path, model_type)):
if arch == "processors":
continue
to_upload.append(os.path.join(output_path, model_type, arch))
to_upload = sorted(to_upload)
upload_results = {}
if len(to_upload) > 0:
for model_dir in to_upload:
try:
upload_model(model_dir, organization, token)
except Exception as e:
error = f"Failed to upload {model_dir}. {e.__class__.__name__}: {e}"
logger.error(error)
upload_results[model_dir] = error
with open(os.path.join(report_path, "failed_uploads.json"), "w") as fp:
json.dump(upload_results, fp, indent=4)
# Build the tiny model summary file. The `tokenizer_classes` and `processor_classes` could be both empty lists.
# When using the items in this file to update the file `tests/utils/tiny_model_summary.json`, the model
# architectures with `tokenizer_classes` and `processor_classes` being both empty should **NOT** be added to
# `tests/utils/tiny_model_summary.json`.
tiny_model_summary = build_tiny_model_summary(results, organization=organization, token=token)
with open(os.path.join(report_path, "tiny_model_summary.json"), "w") as fp:
json.dump(tiny_model_summary, fp, indent=4)
with open(os.path.join(report_path, "tiny_model_creation_report.json"), "w") as fp:
json.dump(results, fp, indent=4)
# Build the warning/failure report (json format): same format as the complete `results` except this contains only
# warnings or errors.
failed_results = build_failed_report(results)
with open(os.path.join(report_path, "failed_report.json"), "w") as fp:
json.dump(failed_results, fp, indent=4)
simple_report, failed_report = build_simple_report(results)
# The simplified report: a .txt file with each line of format:
# {model architecture name}: {OK or error message}
with open(os.path.join(report_path, "simple_report.txt"), "w") as fp:
fp.write(simple_report)
# The simplified failure report: same above except this only contains line with errors
with open(os.path.join(report_path, "simple_failed_report.txt"), "w") as fp:
fp.write(failed_report)
update_tiny_model_summary_file(report_path=os.path.join(output_path, "reports"))
if __name__ == "__main__":
# This has to be `spawn` to avoid hanging forever!
multiprocessing.set_start_method("spawn")
def list_str(values):
return values.split(",")
parser = argparse.ArgumentParser()
parser.add_argument("--all", action="store_true", help="Will create all tiny models.")
parser.add_argument(
"--no_check",
action="store_true",
help="If set, will not check the validity of architectures. Use with caution.",
)
parser.add_argument(
"-m",
"--model_types",
type=list_str,
help="Comma-separated list of model type(s) from which the tiny models will be created.",
)
parser.add_argument(
"--models_to_skip",
type=list_str,
help=(
"Comma-separated list of model class names(s) from which the tiny models won't be created.\nThis is usually"
"the list of model classes that have their tiny versions already uploaded to the Hub."
),
)
parser.add_argument("--upload", action="store_true", help="If to upload the created tiny models to the Hub.")
parser.add_argument(
"--organization",
default=None,
type=str,
help="The organization on the Hub to which the tiny models will be uploaded.",
)
parser.add_argument(
"--token", default=None, type=str, help="A valid authentication token for HuggingFace Hub with write access."
)
parser.add_argument("output_path", type=Path, help="Path indicating where to store generated model.")
parser.add_argument("--num_workers", default=1, type=int, help="The number of workers to run.")
args = parser.parse_args()
if not args.all and not args.model_types:
raise ValueError("Please provide at least one model type or pass `--all` to export all architectures.")
create_tiny_models(
args.output_path,
args.all,
args.model_types,
args.models_to_skip,
args.no_check,
args.upload,
args.organization,
args.token,
args.num_workers,
)
| 67,548 | 42.834523 | 120 | py |
transformers | transformers-main/utils/check_copies.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
TRANSFORMERS_PATH = "src/transformers"
PATH_TO_DOCS = "docs/source/en"
REPO_PATH = "."
# Mapping for files that are full copies of others (keys are copies, values the file to keep them up to data with)
FULL_COPIES = {
"examples/tensorflow/question-answering/utils_qa.py": "examples/pytorch/question-answering/utils_qa.py",
"examples/flax/question-answering/utils_qa.py": "examples/pytorch/question-answering/utils_qa.py",
}
LOCALIZED_READMES = {
# If the introduction or the conclusion of the list change, the prompts may need to be updated.
"README.md": {
"start_prompt": "🤗 Transformers currently provides the following architectures",
"end_prompt": "1. Want to contribute a new model?",
"format_model_list": (
"**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by"
" {paper_authors}.{supplements}"
),
},
"README_zh-hans.md": {
"start_prompt": "🤗 Transformers 目前支持如下的架构",
"end_prompt": "1. 想要贡献新的模型?",
"format_model_list": (
"**[{title}]({model_link})** (来自 {paper_affiliations}) 伴随论文 {paper_title_link} 由 {paper_authors}"
" 发布。{supplements}"
),
},
"README_zh-hant.md": {
"start_prompt": "🤗 Transformers 目前支援以下的架構",
"end_prompt": "1. 想要貢獻新的模型?",
"format_model_list": (
"**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by"
" {paper_authors}.{supplements}"
),
},
"README_ko.md": {
"start_prompt": "🤗 Transformers는 다음 모델들을 제공합니다",
"end_prompt": "1. 새로운 모델을 올리고 싶나요?",
"format_model_list": (
"**[{title}]({model_link})** ({paper_affiliations} 에서 제공)은 {paper_authors}.{supplements}의"
" {paper_title_link}논문과 함께 발표했습니다."
),
},
"README_es.md": {
"start_prompt": "🤗 Transformers actualmente proporciona las siguientes arquitecturas",
"end_prompt": "1. ¿Quieres aportar un nuevo modelo?",
"format_model_list": (
"**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by"
" {paper_authors}.{supplements}"
),
},
"README_ja.md": {
"start_prompt": "🤗Transformersは現在、以下のアーキテクチャを提供しています",
"end_prompt": "1. 新しいモデルを投稿したいですか?",
"format_model_list": (
"**[{title}]({model_link})** ({paper_affiliations} から) {paper_authors}.{supplements} から公開された研究論文"
" {paper_title_link}"
),
},
"README_hd.md": {
"start_prompt": "🤗 ट्रांसफॉर्मर वर्तमान में निम्नलिखित आर्किटेक्चर का समर्थन करते हैं",
"end_prompt": "1. एक नए मॉडल में योगदान देना चाहते हैं?",
"format_model_list": (
"**[{title}]({model_link})** ({paper_affiliations} से) {paper_authors}.{supplements} द्वारा"
"अनुसंधान पत्र {paper_title_link} के साथ जारी किया गया"
),
},
}
# This is to make sure the transformers module imported is the one in the repo.
transformers_module = direct_transformers_import(TRANSFORMERS_PATH)
def _should_continue(line, indent):
return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None
def find_code_in_transformers(object_name):
"""Find and return the code source code of `object_name`."""
parts = object_name.split(".")
i = 0
# First let's find the module where our object lives.
module = parts[i]
while i < len(parts) and not os.path.isfile(os.path.join(TRANSFORMERS_PATH, f"{module}.py")):
i += 1
if i < len(parts):
module = os.path.join(module, parts[i])
if i >= len(parts):
raise ValueError(
f"`object_name` should begin with the name of a module of transformers but got {object_name}."
)
with open(os.path.join(TRANSFORMERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Now let's find the class / func in the code!
indent = ""
line_index = 0
for name in parts[i + 1 :]:
while (
line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lines):
raise ValueError(f" {object_name} does not match any function or class in {module}.")
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
start_index = line_index
while line_index < len(lines) and _should_continue(lines[line_index], indent):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
code_lines = lines[start_index:line_index]
return "".join(code_lines)
_re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+transformers\.(\S+\.\S+)\s*($|\S.*$)")
_re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
_re_fill_pattern = re.compile(r"<FILL\s+[^>]*>")
def get_indent(code):
lines = code.split("\n")
idx = 0
while idx < len(lines) and len(lines[idx]) == 0:
idx += 1
if idx < len(lines):
return re.search(r"^(\s*)\S", lines[idx]).groups()[0]
return ""
def blackify(code):
"""
Applies the black part of our `make style` command to `code`.
"""
has_indent = len(get_indent(code)) > 0
if has_indent:
code = f"class Bla:\n{code}"
mode = black.Mode(target_versions={black.TargetVersion.PY37}, line_length=119)
result = black.format_str(code, mode=mode)
result, _ = style_docstrings_in_code(result)
return result[len("class Bla:\n") :] if has_indent else result
def is_copy_consistent(filename, overwrite=False):
"""
Check if the code commented as a copy in `filename` matches the original.
Return the differences or overwrites the content depending on `overwrite`.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
diffs = []
line_index = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lines):
search = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
indent, object_name, replace_pattern = search.groups()
theoretical_code = find_code_in_transformers(object_name)
theoretical_indent = get_indent(theoretical_code)
start_index = line_index + 1 if indent == theoretical_indent else line_index + 2
indent = theoretical_indent
line_index = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
should_continue = True
while line_index < len(lines) and should_continue:
line_index += 1
if line_index >= len(lines):
break
line = lines[line_index]
should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
observed_code_lines = lines[start_index:line_index]
observed_code = "".join(observed_code_lines)
# Before comparing, use the `replace_pattern` on the original code.
if len(replace_pattern) > 0:
patterns = replace_pattern.replace("with", "").split(",")
patterns = [_re_replace_pattern.search(p) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
obj1, obj2, option = pattern.groups()
theoretical_code = re.sub(obj1, obj2, theoretical_code)
if option.strip() == "all-casing":
theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code)
theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
theoretical_code = blackify(lines[start_index - 1] + theoretical_code)
theoretical_code = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diff_index = start_index + 1
for observed_line, theoretical_line in zip(observed_code.split("\n"), theoretical_code.split("\n")):
if observed_line != theoretical_line:
break
diff_index += 1
diffs.append([object_name, diff_index])
if overwrite:
lines = lines[:start_index] + [theoretical_code] + lines[line_index:]
line_index = start_index + 1
if overwrite and len(diffs) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}.")
with open(filename, "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
return diffs
def check_copies(overwrite: bool = False):
all_files = glob.glob(os.path.join(TRANSFORMERS_PATH, "**/*.py"), recursive=True)
diffs = []
for filename in all_files:
new_diffs = is_copy_consistent(filename, overwrite)
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(diffs) > 0:
diff = "\n".join(diffs)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them."
)
check_model_list_copy(overwrite=overwrite)
def check_full_copies(overwrite: bool = False):
diffs = []
for target, source in FULL_COPIES.items():
with open(source, "r", encoding="utf-8") as f:
source_code = f.read()
with open(target, "r", encoding="utf-8") as f:
target_code = f.read()
if source_code != target_code:
if overwrite:
with open(target, "w", encoding="utf-8") as f:
print(f"Replacing the content of {target} by the one of {source}.")
f.write(source_code)
else:
diffs.append(f"- {target}: copy does not match {source}.")
if not overwrite and len(diffs) > 0:
diff = "\n".join(diffs)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them."
)
def get_model_list(filename, start_prompt, end_prompt):
"""Extracts the model list from the README."""
with open(os.path.join(REPO_PATH, filename), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start of the list.
start_index = 0
while not lines[start_index].startswith(start_prompt):
start_index += 1
start_index += 1
result = []
current_line = ""
end_index = start_index
while not lines[end_index].startswith(end_prompt):
if lines[end_index].startswith("1."):
if len(current_line) > 1:
result.append(current_line)
current_line = lines[end_index]
elif len(lines[end_index]) > 1:
current_line = f"{current_line[:-1]} {lines[end_index].lstrip()}"
end_index += 1
if len(current_line) > 1:
result.append(current_line)
return "".join(result)
def convert_to_localized_md(model_list, localized_model_list, format_str):
"""Convert `model_list` to each localized README."""
def _rep(match):
title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups()
return format_str.format(
title=title,
model_link=model_link,
paper_affiliations=paper_affiliations,
paper_title_link=paper_title_link,
paper_authors=paper_authors,
supplements=" " + supplements.strip() if len(supplements) != 0 else "",
)
# This regex captures metadata from an English model description, including model title, model link,
# affiliations of the paper, title of the paper, authors of the paper, and supplemental data (see DistilBERT for example).
_re_capture_meta = re.compile(
r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\* \(from ([^)]*)\)[^\[]*([^\)]*\)).*?by (.*?[A-Za-z\*]{2,}?)\. (.*)$"
)
# This regex is used to synchronize link.
_re_capture_title_link = re.compile(r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\*")
if len(localized_model_list) == 0:
localized_model_index = {}
else:
try:
localized_model_index = {
re.search(r"\*\*\[([^\]]*)", line).groups()[0]: line
for line in localized_model_list.strip().split("\n")
}
except AttributeError:
raise AttributeError("A model name in localized READMEs cannot be recognized.")
model_keys = [re.search(r"\*\*\[([^\]]*)", line).groups()[0] for line in model_list.strip().split("\n")]
# We exclude keys in localized README not in the main one.
readmes_match = not any(k not in model_keys for k in localized_model_index)
localized_model_index = {k: v for k, v in localized_model_index.items() if k in model_keys}
for model in model_list.strip().split("\n"):
title, model_link = _re_capture_title_link.search(model).groups()
if title not in localized_model_index:
readmes_match = False
# Add an anchor white space behind a model description string for regex.
# If metadata cannot be captured, the English version will be directly copied.
localized_model_index[title] = _re_capture_meta.sub(_rep, model + " ")
elif _re_fill_pattern.search(localized_model_index[title]) is not None:
update = _re_capture_meta.sub(_rep, model + " ")
if update != localized_model_index[title]:
readmes_match = False
localized_model_index[title] = update
else:
# Synchronize link
localized_model_index[title] = _re_capture_title_link.sub(
f"**[{title}]({model_link})**", localized_model_index[title], count=1
)
sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower())
return readmes_match, "\n".join((x[1] for x in sorted_index)) + "\n"
def convert_readme_to_index(model_list):
model_list = model_list.replace("https://huggingface.co/docs/transformers/main/", "")
return model_list.replace("https://huggingface.co/docs/transformers/", "")
def _find_text_in_file(filename, start_prompt, end_prompt):
"""
Find the text in `filename` between a line beginning with `start_prompt` and before `end_prompt`, removing empty
lines.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start prompt.
start_index = 0
while not lines[start_index].startswith(start_prompt):
start_index += 1
start_index += 1
end_index = start_index
while not lines[end_index].startswith(end_prompt):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
def check_model_list_copy(overwrite=False, max_per_line=119):
"""Check the model lists in the README and index.rst are consistent and maybe `overwrite`."""
# Fix potential doc links in the README
with open(os.path.join(REPO_PATH, "README.md"), "r", encoding="utf-8", newline="\n") as f:
readme = f.read()
new_readme = readme.replace("https://huggingface.co/transformers", "https://huggingface.co/docs/transformers")
new_readme = new_readme.replace(
"https://huggingface.co/docs/main/transformers", "https://huggingface.co/docs/transformers/main"
)
if new_readme != readme:
if overwrite:
with open(os.path.join(REPO_PATH, "README.md"), "w", encoding="utf-8", newline="\n") as f:
f.write(new_readme)
else:
raise ValueError(
"The main README contains wrong links to the documentation of Transformers. Run `make fix-copies` to "
"automatically fix them."
)
# If the introduction or the conclusion of the list change, the prompts may need to be updated.
index_list, start_index, end_index, lines = _find_text_in_file(
filename=os.path.join(PATH_TO_DOCS, "index.md"),
start_prompt="<!--This list is updated automatically from the README",
end_prompt="### Supported frameworks",
)
md_list = get_model_list(
filename="README.md",
start_prompt=LOCALIZED_READMES["README.md"]["start_prompt"],
end_prompt=LOCALIZED_READMES["README.md"]["end_prompt"],
)
converted_md_lists = []
for filename, value in LOCALIZED_READMES.items():
_start_prompt = value["start_prompt"]
_end_prompt = value["end_prompt"]
_format_model_list = value["format_model_list"]
localized_md_list = get_model_list(filename, _start_prompt, _end_prompt)
readmes_match, converted_md_list = convert_to_localized_md(md_list, localized_md_list, _format_model_list)
converted_md_lists.append((filename, readmes_match, converted_md_list, _start_prompt, _end_prompt))
converted_md_list = convert_readme_to_index(md_list)
if converted_md_list != index_list:
if overwrite:
with open(os.path.join(PATH_TO_DOCS, "index.md"), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [converted_md_list] + lines[end_index:])
else:
raise ValueError(
"The model list in the README changed and the list in `index.md` has not been updated. Run "
"`make fix-copies` to fix this."
)
for converted_md_list in converted_md_lists:
filename, readmes_match, converted_md, _start_prompt, _end_prompt = converted_md_list
if filename == "README.md":
continue
if overwrite:
_, start_index, end_index, lines = _find_text_in_file(
filename=os.path.join(REPO_PATH, filename), start_prompt=_start_prompt, end_prompt=_end_prompt
)
with open(os.path.join(REPO_PATH, filename), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [converted_md] + lines[end_index:])
elif not readmes_match:
raise ValueError(
f"The model list in the README changed and the list in `{filename}` has not been updated. Run "
"`make fix-copies` to fix this."
)
SPECIAL_MODEL_NAMES = {
"Bert Generation": "BERT For Sequence Generation",
"BigBird": "BigBird-RoBERTa",
"Data2VecAudio": "Data2Vec",
"Data2VecText": "Data2Vec",
"Data2VecVision": "Data2Vec",
"DonutSwin": "Swin Transformer",
"Marian": "MarianMT",
"MaskFormerSwin": "Swin Transformer",
"OpenAI GPT-2": "GPT-2",
"OpenAI GPT": "GPT",
"Perceiver": "Perceiver IO",
"SAM": "Segment Anything",
"ViT": "Vision Transformer (ViT)",
}
# Update this list with the models that shouldn't be in the README. This only concerns modular models or those who do
# not have an associated paper.
MODELS_NOT_IN_README = [
"BertJapanese",
"Encoder decoder",
"FairSeq Machine-Translation",
"HerBERT",
"RetriBERT",
"Speech Encoder decoder",
"Speech2Text",
"Speech2Text2",
"TimmBackbone",
"Vision Encoder decoder",
"VisionTextDualEncoder",
]
README_TEMPLATE = (
"1. **[{model_name}](https://huggingface.co/docs/main/transformers/model_doc/{model_type})** (from "
"<FILL INSTITUTION>) released with the paper [<FILL PAPER TITLE>](<FILL ARKIV LINK>) by <FILL AUTHORS>."
)
def check_readme(overwrite=False):
info = LOCALIZED_READMES["README.md"]
models, start_index, end_index, lines = _find_text_in_file(
os.path.join(REPO_PATH, "README.md"),
info["start_prompt"],
info["end_prompt"],
)
models_in_readme = [re.search(r"\*\*\[([^\]]*)", line).groups()[0] for line in models.strip().split("\n")]
model_names_mapping = transformers_module.models.auto.configuration_auto.MODEL_NAMES_MAPPING
absents = [
(key, name)
for key, name in model_names_mapping.items()
if SPECIAL_MODEL_NAMES.get(name, name) not in models_in_readme
]
# Remove exceptions
absents = [(key, name) for key, name in absents if name not in MODELS_NOT_IN_README]
if len(absents) > 0 and not overwrite:
print(absents)
raise ValueError(
"The main README doesn't contain all models, run `make fix-copies` to fill it with the missing model(s)"
" then complete the generated entries.\nIf the model is not supposed to be in the main README, add it to"
" the list `MODELS_NOT_IN_README` in utils/check_copies.py.\nIf it has a different name in the repo than"
" in the README, map the correspondence in `SPECIAL_MODEL_NAMES` in utils/check_copies.py."
)
new_models = [README_TEMPLATE.format(model_name=name, model_type=key) for key, name in absents]
all_models = models.strip().split("\n") + new_models
all_models = sorted(all_models, key=lambda x: re.search(r"\*\*\[([^\]]*)", x).groups()[0].lower())
all_models = "\n".join(all_models) + "\n"
if all_models != models:
if overwrite:
print("Fixing the main README.")
with open(os.path.join(REPO_PATH, "README.md"), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [all_models] + lines[end_index:])
else:
raise ValueError("The main README model list is not properly sorted. Run `make fix-copies` to fix this.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_readme(args.fix_and_overwrite)
check_copies(args.fix_and_overwrite)
check_full_copies(args.fix_and_overwrite)
| 23,859 | 40.067126 | 126 | py |
transformers | transformers-main/utils/check_tf_ops.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pb2 import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
REPO_PATH = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
INTERNAL_OPS = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def onnx_compliancy(saved_model_path, strict, opset):
saved_model = SavedModel()
onnx_ops = []
with open(os.path.join(REPO_PATH, "utils", "tf_ops", "onnx.json")) as f:
onnx_opsets = json.load(f)["opsets"]
for i in range(1, opset + 1):
onnx_ops.extend(onnx_opsets[str(i)])
with open(saved_model_path, "rb") as f:
saved_model.ParseFromString(f.read())
model_op_names = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node)
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def)
# Convert to list, sorted if you want
model_op_names = sorted(model_op_names)
incompatible_ops = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(op)
if strict and len(incompatible_ops) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops)
elif len(incompatible_ops) > 0:
print(f"Found the following incompatible ops for the opset {opset}:")
print(*incompatible_ops, sep="\n")
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
args = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 3,574 | 34.04902 | 119 | py |
transformers | transformers-main/utils/get_github_job_time.py | import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def extract_time_from_single_job(job):
"""Extract time info from a single job in a GitHub Actions workflow run"""
job_info = {}
start = job["started_at"]
end = job["completed_at"]
start_datetime = date_parser.parse(start)
end_datetime = date_parser.parse(end)
duration_in_min = round((end_datetime - start_datetime).total_seconds() / 60.0)
job_info["started_at"] = start
job_info["completed_at"] = end
job_info["duration"] = duration_in_min
return job_info
def get_job_time(workflow_run_id, token=None):
"""Extract time info for all jobs in a GitHub Actions workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
result = requests.get(url, headers=headers).json()
job_time = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]})
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
if __name__ == "__main__":
r"""
Example:
python get_github_job_time.py --workflow_run_id 2945609517
"""
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
args = parser.parse_args()
job_time = get_job_time(args.workflow_run_id)
job_time = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 2,190 | 29.430556 | 115 | py |
transformers | transformers-main/utils/check_inits.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import re
from pathlib import Path
PATH_TO_TRANSFORMERS = "src/transformers"
# Matches is_xxx_available()
_re_backend = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_re_one_line_import_struct = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_re_test_backend = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_re_quote_object = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_re_between_brackets = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_re_try = re.compile(r"^\s*try:")
# Catches a line with else:
_re_else = re.compile(r"^\s*else:")
def find_backend(line):
"""Find one (or multiple) backend in a code line of the init."""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
def parse_init(init_file):
"""
Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects
defined
"""
with open(init_file, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
line_index = 0
while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lines):
return None
# First grab the objects without a specific backend in _import_structure
objects = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
line = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(line):
content = _re_one_line_import_struct.search(line).groups()[0]
imports = re.findall(r"\[([^\]]+)\]", content)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1
continue
single_line_import_search = _re_import_struct_key_value.search(line)
if single_line_import_search is not None:
imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0]
objects.extend(imports)
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
line_index += 1
import_dict_objects = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if not is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
backend = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
line = lines[line_index]
if _re_import_struct_add_one.search(line) is not None:
objects.append(_re_import_struct_add_one.search(line).groups()[0])
elif _re_import_struct_add_many.search(line) is not None:
imports = _re_import_struct_add_many.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_between_brackets.search(line) is not None:
imports = _re_between_brackets.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_quote_object.search(line) is not None:
objects.append(_re_quote_object.search(line).groups()[0])
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + '"'):
objects.append(line[13:-3])
line_index += 1
import_dict_objects[backend] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
objects = []
while (
line_index < len(lines)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
type_hint_objects = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
backend = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
type_hint_objects[backend] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def analyze_results(import_dict_objects, type_hint_objects):
"""
Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.
"""
def find_duplicates(seq):
return [k for k, v in collections.Counter(seq).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
errors = []
for key in import_dict_objects.keys():
duplicate_imports = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
duplicate_type_hints = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
name = "base imports" if key == "none" else f"{key} backend"
errors.append(f"Differences for {name}:")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
return errors
def check_all_inits():
"""
Check all inits in the transformers repo and raise an error if at least one does not define the same objects in
both halves.
"""
failures = []
for root, _, files in os.walk(PATH_TO_TRANSFORMERS):
if "__init__.py" in files:
fname = os.path.join(root, "__init__.py")
objects = parse_init(fname)
if objects is not None:
errors = analyze_results(*objects)
if len(errors) > 0:
errors[0] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(errors))
if len(failures) > 0:
raise ValueError("\n\n".join(failures))
def get_transformers_submodules():
"""
Returns the list of Transformers submodules.
"""
submodules = []
for path, directories, files in os.walk(PATH_TO_TRANSFORMERS):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(folder)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(path) / folder).glob("*.py"))) == 0:
continue
short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(os.path.sep, ".")
submodules.append(submodule)
for fname in files:
if fname == "__init__.py":
continue
short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(".py", "").replace(os.path.sep, ".")
if len(submodule.split(".")) == 1:
submodules.append(submodule)
return submodules
IGNORE_SUBMODULES = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def check_submodules():
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
import_structure_keys = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r") as f:
init_content = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]", init_content)))
module_not_registered = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(module_not_registered) > 0:
list_of_modules = "\n".join(f"- {module}" for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value."
)
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 12,878 | 41.088235 | 117 | py |
transformers | transformers-main/utils/check_model_tester.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from get_test_info import get_tester_classes
if __name__ == "__main__":
failures = []
pattern = os.path.join("tests", "models", "**", "test_modeling_*.py")
test_files = glob.glob(pattern)
# TODO: deal with TF/Flax too
test_files = [
x for x in test_files if not (x.startswith("test_modeling_tf_") or x.startswith("test_modeling_flax_"))
]
for test_file in test_files:
tester_classes = get_tester_classes(test_file)
for tester_class in tester_classes:
# A few tester classes don't have `parent` parameter in `__init__`.
# TODO: deal this better
try:
tester = tester_class(parent=None)
except Exception:
continue
if hasattr(tester, "get_config"):
config = tester.get_config()
for k, v in config.to_dict().items():
if isinstance(v, int):
target = None
if k in ["vocab_size"]:
target = 100
elif k in ["max_position_embeddings"]:
target = 128
elif k in ["hidden_size", "d_model"]:
target = 40
elif k == ["num_layers", "num_hidden_layers", "num_encoder_layers", "num_decoder_layers"]:
target = 5
if target is not None and v > target:
failures.append(
f"{tester_class.__name__} will produce a `config` of type `{config.__class__.__name__}`"
f' with config["{k}"] = {v} which is too large for testing! Set its value to be smaller'
f" than {target}."
)
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
| 2,598 | 39.609375 | 120 | py |
transformers | transformers-main/utils/sort_auto_mappings.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
PATH_TO_AUTO_MODULE = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_re_intro_mapping = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_re_identifier = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def sort_auto_mapping(fname, overwrite: bool = False):
with open(fname, "r", encoding="utf-8") as f:
content = f.read()
lines = content.split("\n")
new_lines = []
line_idx = 0
while line_idx < len(lines):
if _re_intro_mapping.search(lines[line_idx]) is not None:
indent = len(re.search(r"^(\s*)\S", lines[line_idx]).groups()[0]) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "("):
new_lines.append(lines[line_idx])
line_idx += 1
blocks = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
start_idx = line_idx
while not lines[line_idx].startswith(" " * indent + ")"):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1]))
else:
blocks.append(lines[line_idx])
line_idx += 1
# Sort blocks by their identifiers
blocks = sorted(blocks, key=lambda x: _re_identifier.search(x).groups()[0])
new_lines += blocks
else:
new_lines.append(lines[line_idx])
line_idx += 1
if overwrite:
with open(fname, "w", encoding="utf-8") as f:
f.write("\n".join(new_lines))
elif "\n".join(new_lines) != content:
return True
def sort_all_auto_mappings(overwrite: bool = False):
fnames = [os.path.join(PATH_TO_AUTO_MODULE, f) for f in os.listdir(PATH_TO_AUTO_MODULE) if f.endswith(".py")]
diffs = [sort_auto_mapping(fname, overwrite=overwrite) for fname in fnames]
if not overwrite and any(diffs):
failures = [f for f, d in zip(fnames, diffs) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(failures)}. Run `make style` to fix"
" this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
args = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 3,313 | 35.822222 | 119 | py |
transformers | transformers-main/utils/past_ci_versions.py | import argparse
import os
past_versions_testing = {
"pytorch": {
"1.13": {
"torch": "1.13.1",
"torchvision": "0.14.1",
"torchaudio": "0.13.1",
"python": 3.9,
"cuda": "cu116",
"install": (
"python3 -m pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1"
" --extra-index-url https://download.pytorch.org/whl/cu116"
),
"base_image": "nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04",
},
"1.12": {
"torch": "1.12.1",
"torchvision": "0.13.1",
"torchaudio": "0.12.1",
"python": 3.9,
"cuda": "cu113",
"install": (
"python3 -m pip install --no-cache-dir -U torch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1"
" --extra-index-url https://download.pytorch.org/whl/cu113"
),
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"1.11": {
"torch": "1.11.0",
"torchvision": "0.12.0",
"torchaudio": "0.11.0",
"python": 3.9,
"cuda": "cu113",
"install": (
"python3 -m pip install --no-cache-dir -U torch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0"
" --extra-index-url https://download.pytorch.org/whl/cu113"
),
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"1.10": {
"torch": "1.10.2",
"torchvision": "0.11.3",
"torchaudio": "0.10.2",
"python": 3.9,
"cuda": "cu113",
"install": (
"python3 -m pip install --no-cache-dir -U torch==1.10.2 torchvision==0.11.3 torchaudio==0.10.2"
" --extra-index-url https://download.pytorch.org/whl/cu113"
),
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
# torchaudio < 0.10 has no CUDA-enabled binary distributions
"1.9": {
"torch": "1.9.1",
"torchvision": "0.10.1",
"torchaudio": "0.9.1",
"python": 3.9,
"cuda": "cu111",
"install": (
"python3 -m pip install --no-cache-dir -U torch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1"
" --extra-index-url https://download.pytorch.org/whl/cu111"
),
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
},
"tensorflow": {
"2.11": {
"tensorflow": "2.11.1",
"install": "python3 -m pip install --no-cache-dir -U tensorflow==2.11.1",
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"2.10": {
"tensorflow": "2.10.1",
"install": "python3 -m pip install --no-cache-dir -U tensorflow==2.10.1",
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"2.9": {
"tensorflow": "2.9.3",
"install": "python3 -m pip install --no-cache-dir -U tensorflow==2.9.3",
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"2.8": {
"tensorflow": "2.8.2",
"install": "python3 -m pip install --no-cache-dir -U tensorflow==2.8.2",
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"2.7": {
"tensorflow": "2.7.3",
"install": "python3 -m pip install --no-cache-dir -U tensorflow==2.7.3",
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"2.6": {
"tensorflow": "2.6.5",
"install": "python3 -m pip install --no-cache-dir -U tensorflow==2.6.5",
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
"2.5": {
"tensorflow": "2.5.3",
"install": "python3 -m pip install --no-cache-dir -U tensorflow==2.5.3",
"base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04",
},
},
}
if __name__ == "__main__":
parser = argparse.ArgumentParser("Choose the framework and version to install")
parser.add_argument(
"--framework", help="The framework to install. Should be `torch` or `tensorflow`", type=str, required=True
)
parser.add_argument("--version", help="The version of the framework to install.", type=str, required=True)
args = parser.parse_args()
info = past_versions_testing[args.framework][args.version]
os.system(f'echo "export INSTALL_CMD=\'{info["install"]}\'" >> ~/.profile')
print(f'echo "export INSTALL_CMD=\'{info["install"]}\'" >> ~/.profile')
cuda = ""
if args.framework == "pytorch":
cuda = info["cuda"]
os.system(f"echo \"export CUDA='{cuda}'\" >> ~/.profile")
print(f"echo \"export CUDA='{cuda}'\" >> ~/.profile")
| 4,995 | 38.338583 | 114 | py |
transformers | transformers-main/utils/release.py | # coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import packaging.version
PATH_TO_EXAMPLES = "examples/"
REPLACE_PATTERNS = {
"examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
"init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
"setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
"doc": (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
REPLACE_FILES = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
README_FILE = "README.md"
def update_version_in_file(fname, version, pattern):
"""Update the version in one file using a specific pattern."""
with open(fname, "r", encoding="utf-8", newline="\n") as f:
code = f.read()
re_pattern, replace = REPLACE_PATTERNS[pattern]
replace = replace.replace("VERSION", version)
code = re_pattern.sub(replace, code)
with open(fname, "w", encoding="utf-8", newline="\n") as f:
f.write(code)
def update_version_in_examples(version):
"""Update the version in all examples files."""
for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects")
if "legacy" in directories:
directories.remove("legacy")
for fname in fnames:
if fname.endswith(".py"):
update_version_in_file(os.path.join(folder, fname), version, pattern="examples")
def global_version_update(version, patch=False):
"""Update the version in all needed files."""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
if not patch:
update_version_in_examples(version)
def clean_main_ref_in_model_list():
"""Replace the links from main doc tp stable doc in the model list of the README."""
# If the introduction or the conclusion of the list change, the prompts may need to be updated.
_start_prompt = "🤗 Transformers currently provides the following architectures"
_end_prompt = "1. Want to contribute a new model?"
with open(README_FILE, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start of the list.
start_index = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
index = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith("1."):
lines[index] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc",
"https://huggingface.co/docs/transformers/model_doc",
)
index += 1
with open(README_FILE, "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
def get_version():
"""Reads the current version in the __init__."""
with open(REPLACE_FILES["init"], "r") as f:
code = f.read()
default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
return packaging.version.parse(default_version)
def pre_release_work(patch=False):
"""Do all the necessary pre-release steps."""
# First let's get the default version: base version if we are in dev, bump minor otherwise.
default_version = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version
elif patch:
default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
default_version = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
version = input(f"Which version are you releasing? [{default_version}]")
if len(version) == 0:
version = default_version
print(f"Updating version to {version}.")
global_version_update(version, patch=patch)
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`.")
clean_main_ref_in_model_list()
def post_release_work():
"""Do all the necesarry post-release steps."""
# First let's get the current version
current_version = get_version()
dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
current_version = current_version.base_version
# Check with the user we got that right.
version = input(f"Which version are we developing now? [{dev_version}]")
if len(version) == 0:
version = dev_version
print(f"Updating version to {version}.")
global_version_update(version)
print("Cleaning main README, don't forget to run `make fix-copies`.")
clean_main_ref_in_model_list()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
args = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 6,207 | 38.044025 | 115 | py |
transformers | transformers-main/utils/get_previous_daily_ci.py | import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def get_daily_ci_runs(token, num_runs=7):
"""Get the workflow runs of the scheduled (daily) CI.
This only selects the runs triggered by the `schedule` event on the `main` branch.
"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
workflow_id = "636036"
url = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
result = requests.get(url, headers=headers).json()
return result["workflow_runs"]
def get_last_daily_ci_runs(token):
"""Get the last completed workflow run id of the scheduled (daily) CI."""
workflow_runs = get_daily_ci_runs(token)
workflow_run_id = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
workflow_run_id = workflow_run["id"]
break
return workflow_run_id
def get_last_daily_ci_artifacts(artifact_names, output_dir, token):
"""Get the artifacts of last completed workflow run id of the scheduled (daily) CI."""
workflow_run_id = get_last_daily_ci_runs(token)
if workflow_run_id is not None:
artifacts_links = get_artifacts_links(worflow_run_id=workflow_run_id, token=token)
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
artifact_url = artifacts_links[artifact_name]
download_artifact(
artifact_name=artifact_name, artifact_url=artifact_url, output_dir=output_dir, token=token
)
def get_last_daily_ci_reports(artifact_names, output_dir, token):
"""Get the artifacts' content of the last completed workflow run id of the scheduled (daily) CI."""
get_last_daily_ci_artifacts(artifact_names, output_dir, token)
results = {}
for artifact_name in artifact_names:
artifact_zip_path = os.path.join(output_dir, f"{artifact_name}.zip")
if os.path.isfile(artifact_zip_path):
results[artifact_name] = {}
with zipfile.ZipFile(artifact_zip_path) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
with z.open(filename) as f:
results[artifact_name][filename] = f.read().decode("UTF-8")
return results
| 2,763 | 37.929577 | 110 | py |
transformers | transformers-main/utils/get_test_info.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
r"""
The argument `test_file` in this file refers to a model test file. This should be a string of the from
`tests/models/*/test_modeling_*.py`.
"""
def get_module_path(test_file):
"""Return the module path of a model test file."""
components = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"{test_file} instead."
)
test_fn = components[-1]
if not test_fn.endswith("py"):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead.")
if not test_fn.startswith("test_modeling_"):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead."
)
components = components[:-1] + [test_fn.replace(".py", "")]
test_module_path = ".".join(components)
return test_module_path
def get_test_module(test_file):
"""Get the module of a model test file."""
test_module_path = get_module_path(test_file)
test_module = importlib.import_module(test_module_path)
return test_module
def get_tester_classes(test_file):
"""Get all classes in a model test file whose names ends with `ModelTester`."""
tester_classes = []
test_module = get_test_module(test_file)
for attr in dir(test_module):
if attr.endswith("ModelTester"):
tester_classes.append(getattr(test_module, attr))
# sort with class names
return sorted(tester_classes, key=lambda x: x.__name__)
def get_test_classes(test_file):
"""Get all [test] classes in a model test file with attribute `all_model_classes` that are non-empty.
These are usually the (model) test classes containing the (non-slow) tests to run and are subclasses of one of the
classes `ModelTesterMixin`, `TFModelTesterMixin` or `FlaxModelTesterMixin`, as well as a subclass of
`unittest.TestCase`. Exceptions include `RagTestMixin` (and its subclasses).
"""
test_classes = []
test_module = get_test_module(test_file)
for attr in dir(test_module):
attr_value = getattr(test_module, attr)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
model_classes = getattr(attr_value, "all_model_classes", [])
if len(model_classes) > 0:
test_classes.append(attr_value)
# sort with class names
return sorted(test_classes, key=lambda x: x.__name__)
def get_model_classes(test_file):
"""Get all model classes that appear in `all_model_classes` attributes in a model test file."""
test_classes = get_test_classes(test_file)
model_classes = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(model_classes, key=lambda x: x.__name__)
def get_model_tester_from_test_class(test_class):
"""Get the model tester class of a model test class."""
test = test_class()
if hasattr(test, "setUp"):
test.setUp()
model_tester = None
if hasattr(test, "model_tester"):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
model_tester = test.model_tester.__class__
return model_tester
def get_test_classes_for_model(test_file, model_class):
"""Get all [test] classes in `test_file` that have `model_class` in their `all_model_classes`."""
test_classes = get_test_classes(test_file)
target_test_classes = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(test_class)
# sort with class names
return sorted(target_test_classes, key=lambda x: x.__name__)
def get_tester_classes_for_model(test_file, model_class):
"""Get all model tester classes in `test_file` that are associated to `model_class`."""
test_classes = get_test_classes_for_model(test_file, model_class)
tester_classes = []
for test_class in test_classes:
tester_class = get_model_tester_from_test_class(test_class)
if tester_class is not None:
tester_classes.append(tester_class)
# sort with class names
return sorted(tester_classes, key=lambda x: x.__name__)
def get_test_to_tester_mapping(test_file):
"""Get a mapping from [test] classes to model tester classes in `test_file`.
This uses `get_test_classes` which may return classes that are NOT subclasses of `unittest.TestCase`.
"""
test_classes = get_test_classes(test_file)
test_tester_mapping = {test_class: get_model_tester_from_test_class(test_class) for test_class in test_classes}
return test_tester_mapping
def get_model_to_test_mapping(test_file):
"""Get a mapping from model classes to test classes in `test_file`."""
model_classes = get_model_classes(test_file)
model_test_mapping = {
model_class: get_test_classes_for_model(test_file, model_class) for model_class in model_classes
}
return model_test_mapping
def get_model_to_tester_mapping(test_file):
"""Get a mapping from model classes to model tester classes in `test_file`."""
model_classes = get_model_classes(test_file)
model_to_tester_mapping = {
model_class: get_tester_classes_for_model(test_file, model_class) for model_class in model_classes
}
return model_to_tester_mapping
def to_json(o):
"""Make the information succinct and easy to read.
Avoid the full class representation like `<class 'transformers.models.bert.modeling_bert.BertForMaskedLM'>` when
displaying the results. Instead, we use class name (`BertForMaskedLM`) for the readability.
"""
if isinstance(o, str):
return o
elif isinstance(o, type):
return o.__name__
elif isinstance(o, (list, tuple)):
return [to_json(x) for x in o]
elif isinstance(o, dict):
return {to_json(k): to_json(v) for k, v in o.items()}
else:
return o
| 7,048 | 35.905759 | 118 | py |
transformers | transformers-main/utils/download_glue_data.py | """ Script for downloading all GLUE data.
Original source: https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
"""
import argparse
import os
import sys
import urllib.request
import zipfile
TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
TASK2PATH = {
"CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4",
"SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8",
"MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc",
"QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5",
"STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5",
"MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce",
"SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df",
"QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601",
"RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb",
"WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf",
"diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D",
}
MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
def download_and_extract(task, data_dir):
print(f"Downloading and extracting {task}...")
data_file = f"{task}.zip"
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
print("\tCompleted!")
def format_mrpc(data_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(data_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN)
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
if not os.path.isfile(mrpc_train_file):
raise ValueError(f"Train data not found at {mrpc_train_file}")
if not os.path.isfile(mrpc_test_file):
raise ValueError(f"Test data not found at {mrpc_test_file}")
urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
dev_ids = []
with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split("\t"))
with open(mrpc_train_file, encoding="utf8") as data_fh, open(
os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf8"
) as train_fh, open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf8") as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split("\t")
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
with open(mrpc_test_file, encoding="utf8") as data_fh, open(
os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf8"
) as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split("\t")
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
print("\tCompleted!")
def download_diagnostic(data_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
os.mkdir(os.path.join(data_dir, "diagnostic"))
data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def get_tasks(task_names):
task_names = task_names.split(",")
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
if task_name not in TASKS:
raise ValueError(f"Task {task_name} not found!")
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", help="directory to save data to", type=str, default="glue_data")
parser.add_argument(
"--tasks", help="tasks to download data for as a comma separated string", type=str, default="all"
)
parser.add_argument(
"--path_to_mrpc",
help="path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt",
type=str,
default="",
)
args = parser.parse_args(arguments)
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if task == "MRPC":
format_mrpc(args.data_dir, args.path_to_mrpc)
elif task == "diagnostic":
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 8,285 | 51.443038 | 615 | py |
transformers | transformers-main/utils/extract_warnings.py | import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
logger = logging.get_logger(__name__)
def extract_warnings_from_single_artifact(artifact_path, targets):
"""Extract warnings from a downloaded artifact (in .zip format)"""
selected_warnings = set()
buffer = []
def parse_line(fp):
for line in fp:
if isinstance(line, bytes):
line = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(buffer) > 0:
warning = "\n".join(buffer)
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets):
selected_warnings.add(warning)
buffer.clear()
continue
else:
line = line.strip()
buffer.append(line)
if from_gh:
for filename in os.listdir(artifact_path):
file_path = os.path.join(artifact_path, filename)
if not os.path.isdir(file_path):
# read the file
if filename != "warnings.txt":
continue
with open(file_path) as fp:
parse_line(fp)
else:
try:
with zipfile.ZipFile(artifact_path) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
if filename != "warnings.txt":
continue
with z.open(filename) as fp:
parse_line(fp)
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped."
)
return selected_warnings
def extract_warnings(artifact_dir, targets):
"""Extract warnings from all artifact files"""
selected_warnings = set()
paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(p, targets))
return selected_warnings
if __name__ == "__main__":
def list_str(values):
return values.split(",")
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
args = parser.parse_args()
from_gh = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
artifacts = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
selected_warnings = extract_warnings(args.output_dir, args.targets)
selected_warnings = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 4,692 | 33.762963 | 112 | py |
transformers | transformers-main/utils/notification_service.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import functools
import json
import operator
import os
import re
import sys
import time
from typing import Dict, List, Optional, Union
import requests
from get_ci_error_statistics import get_job_links
from get_previous_daily_ci import get_last_daily_ci_reports
from slack_sdk import WebClient
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
NON_MODEL_TEST_MODULES = [
"benchmark",
"deepspeed",
"extended",
"fixtures",
"generation",
"onnx",
"optimization",
"pipelines",
"sagemaker",
"trainer",
"utils",
]
def handle_test_results(test_results):
expressions = test_results.split(" ")
failed = 0
success = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(expressions):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def handle_stacktraces(test_results):
# These files should follow the following architecture:
# === FAILURES ===
# <path>:<line>: Error ...
# <path>:<line>: Error ...
# <empty line>
total_stacktraces = test_results.split("\n")[1:-1]
stacktraces = []
for stacktrace in total_stacktraces:
try:
line = stacktrace[: stacktrace.index(" ")].split(":")[-2]
error_message = stacktrace[stacktrace.index(" ") :]
stacktraces.append(f"(line {line}) {error_message}")
except Exception:
stacktraces.append("Cannot retrieve error message.")
return stacktraces
def dicts_to_sum(objects: Union[Dict[str, Dict], List[dict]]):
if isinstance(objects, dict):
lists = objects.values()
else:
lists = objects
# Convert each dictionary to counter
counters = map(collections.Counter, lists)
# Sum all the counters
return functools.reduce(operator.add, counters)
class Message:
def __init__(
self, title: str, ci_title: str, model_results: Dict, additional_results: Dict, selected_warnings: List = None
):
self.title = title
self.ci_title = ci_title
# Failures and success of the modeling tests
self.n_model_success = sum(r["success"] for r in model_results.values())
self.n_model_single_gpu_failures = sum(dicts_to_sum(r["failed"])["single"] for r in model_results.values())
self.n_model_multi_gpu_failures = sum(dicts_to_sum(r["failed"])["multi"] for r in model_results.values())
# Some suites do not have a distinction between single and multi GPU.
self.n_model_unknown_failures = sum(dicts_to_sum(r["failed"])["unclassified"] for r in model_results.values())
self.n_model_failures = (
self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures
)
# Failures and success of the additional tests
self.n_additional_success = sum(r["success"] for r in additional_results.values())
all_additional_failures = dicts_to_sum([r["failed"] for r in additional_results.values()])
self.n_additional_single_gpu_failures = all_additional_failures["single"]
self.n_additional_multi_gpu_failures = all_additional_failures["multi"]
self.n_additional_unknown_gpu_failures = all_additional_failures["unclassified"]
self.n_additional_failures = (
self.n_additional_single_gpu_failures
+ self.n_additional_multi_gpu_failures
+ self.n_additional_unknown_gpu_failures
)
# Results
self.n_failures = self.n_model_failures + self.n_additional_failures
self.n_success = self.n_model_success + self.n_additional_success
self.n_tests = self.n_failures + self.n_success
self.model_results = model_results
self.additional_results = additional_results
self.thread_ts = None
if selected_warnings is None:
selected_warnings = []
self.selected_warnings = selected_warnings
@property
def time(self) -> str:
all_results = [*self.model_results.values(), *self.additional_results.values()]
time_spent = [r["time_spent"].split(", ")[0] for r in all_results if len(r["time_spent"])]
total_secs = 0
for time in time_spent:
time_parts = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(time_parts) == 1:
time_parts = [0, 0, time_parts[0]]
hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(hours)}h{int(minutes)}m{int(seconds)}s"
@property
def header(self) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def ci_title_section(self) -> Dict:
return {"type": "section", "text": {"type": "mrkdwn", "text": self.ci_title}}
@property
def no_failures(self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def failures(self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\n"
f"Number of model failures: {self.n_model_failures}.\n"
f"The suite ran in {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def warnings(self) -> Dict:
# If something goes wrong, let's avoid the CI report failing to be sent.
button_text = "Check warnings (Link not found)"
# Use the workflow run link
job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}"
if "Extract warnings in CI artifacts" in github_actions_job_links:
button_text = "Check warnings"
# Use the actual job link
job_link = f"{github_actions_job_links['Extract warnings in CI artifacts']}"
huggingface_hub_warnings = [x for x in self.selected_warnings if "huggingface_hub" in x]
text = f"There are {len(self.selected_warnings)} warnings being selected."
text += f"\n{len(huggingface_hub_warnings)} of them are from `huggingface_hub`."
return {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": button_text, "emoji": True},
"url": job_link,
},
}
@staticmethod
def get_device_report(report, rjust=6):
if "single" in report and "multi" in report:
return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
elif "single" in report:
return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | "
elif "multi" in report:
return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
@property
def category_failures(self) -> Dict:
model_failures = [v["failed"] for v in self.model_results.values()]
category_failures = {}
for model_failure in model_failures:
for key, value in model_failure.items():
if key not in category_failures:
category_failures[key] = dict(value)
else:
category_failures[key]["unclassified"] += value["unclassified"]
category_failures[key]["single"] += value["single"]
category_failures[key]["multi"] += value["multi"]
individual_reports = []
for key, value in category_failures.items():
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
individual_reports.append(f"{device_report}{key}")
else:
individual_reports.append(key)
header = "Single | Multi | Category\n"
category_failures_report = prepare_reports(
title="The following modeling categories had failures", header=header, reports=individual_reports
)
return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}}
def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report): # noqa
# Remove the leading and training parts that don't contain failure count information.
model_failures = curr_failure_report.split("\n")[3:-2]
prev_model_failures = prev_failure_report.split("\n")[3:-2]
entries_changed = set(model_failures).difference(prev_model_failures)
prev_map = {}
for f in prev_model_failures:
items = [x.strip() for x in f.split("| ")]
prev_map[items[-1]] = [int(x) for x in items[:-1]]
curr_map = {}
for f in entries_changed:
items = [x.strip() for x in f.split("| ")]
curr_map[items[-1]] = [int(x) for x in items[:-1]]
diff_map = {}
for k, v in curr_map.items():
if k not in prev_map:
diff_map[k] = v
else:
diff = [x - y for x, y in zip(v, prev_map[k])]
if max(diff) > 0:
diff_map[k] = diff
entries_changed = []
for model_name, diff_values in diff_map.items():
diff = [str(x) for x in diff_values]
diff = [f"+{x}" if (x != "0" and not x.startswith("-")) else x for x in diff]
diff = [x.rjust(9) for x in diff]
device_report = " | ".join(diff) + " | "
report = f"{device_report}{model_name}"
entries_changed.append(report)
entries_changed = sorted(entries_changed, key=lambda s: s.split("| ")[-1])
return entries_changed
@property
def model_failures(self) -> Dict:
# Obtain per-model failures
def per_model_sum(model_category_dict):
return dicts_to_sum(model_category_dict["failed"].values())
failures = {}
non_model_failures = {
k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values())
}
for k, v in self.model_results.items():
if k in NON_MODEL_TEST_MODULES:
pass
if sum(per_model_sum(v).values()):
dict_failed = dict(v["failed"])
pytorch_specific_failures = dict_failed.pop("PyTorch")
tensorflow_specific_failures = dict_failed.pop("TensorFlow")
other_failures = dicts_to_sum(dict_failed.values())
failures[k] = {
"PyTorch": pytorch_specific_failures,
"TensorFlow": tensorflow_specific_failures,
"other": other_failures,
}
model_reports = []
other_module_reports = []
for key, value in non_model_failures.items():
if key in NON_MODEL_TEST_MODULES:
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
report = f"{device_report}{key}"
else:
report = key
other_module_reports.append(report)
for key, value in failures.items():
device_report_values = [
value["PyTorch"]["single"],
value["PyTorch"]["multi"],
value["TensorFlow"]["single"],
value["TensorFlow"]["multi"],
sum(value["other"].values()),
]
if sum(device_report_values):
device_report = " | ".join([str(x).rjust(9) for x in device_report_values]) + " | "
report = f"{device_report}{key}"
model_reports.append(report)
# (Possibly truncated) reports for the current workflow run - to be sent to Slack channels
model_header = "Single PT | Multi PT | Single TF | Multi TF | Other | Category\n"
sorted_model_reports = sorted(model_reports, key=lambda s: s.split("| ")[-1])
model_failures_report = prepare_reports(
title="These following model modules had failures", header=model_header, reports=sorted_model_reports
)
module_header = "Single | Multi | Category\n"
sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split("| ")[-1])
module_failures_report = prepare_reports(
title="The following non-model modules had failures", header=module_header, reports=sorted_module_reports
)
# To be sent to Slack channels
model_failure_sections = [
{"type": "section", "text": {"type": "mrkdwn", "text": model_failures_report}},
{"type": "section", "text": {"type": "mrkdwn", "text": module_failures_report}},
]
# Save the complete (i.e. no truncation) failure tables (of the current workflow run)
# (to be uploaded as artifacts)
if not os.path.isdir(os.path.join(os.getcwd(), "test_failure_tables")):
os.makedirs(os.path.join(os.getcwd(), "test_failure_tables"))
model_failures_report = prepare_reports(
title="These following model modules had failures",
header=model_header,
reports=sorted_model_reports,
to_truncate=False,
)
file_path = os.path.join(os.getcwd(), "test_failure_tables/model_failures_report.txt")
with open(file_path, "w", encoding="UTF-8") as fp:
fp.write(model_failures_report)
module_failures_report = prepare_reports(
title="The following non-model modules had failures",
header=module_header,
reports=sorted_module_reports,
to_truncate=False,
)
file_path = os.path.join(os.getcwd(), "test_failure_tables/module_failures_report.txt")
with open(file_path, "w", encoding="UTF-8") as fp:
fp.write(module_failures_report)
target_workflow = "huggingface/transformers/.github/workflows/self-scheduled.yml@refs/heads/main"
if os.environ.get("CI_WORKFLOW_REF") == target_workflow:
# Get the last previously completed CI's failure tables
artifact_names = ["test_failure_tables"]
output_dir = os.path.join(os.getcwd(), "previous_reports")
os.makedirs(output_dir, exist_ok=True)
prev_tables = get_last_daily_ci_reports(
artifact_names=artifact_names, output_dir=output_dir, token=os.environ["ACCESS_REPO_INFO_TOKEN"]
)
# if the last run produces artifact named `test_failure_tables`
if (
"test_failure_tables" in prev_tables
and "model_failures_report.txt" in prev_tables["test_failure_tables"]
):
# Compute the difference of the previous/current (model failure) table
prev_model_failures = prev_tables["test_failure_tables"]["model_failures_report.txt"]
entries_changed = self.compute_diff_for_failure_reports(model_failures_report, prev_model_failures)
if len(entries_changed) > 0:
# Save the complete difference
diff_report = prepare_reports(
title="Changed model modules failures",
header=model_header,
reports=entries_changed,
to_truncate=False,
)
file_path = os.path.join(os.getcwd(), "test_failure_tables/changed_model_failures_report.txt")
with open(file_path, "w", encoding="UTF-8") as fp:
fp.write(diff_report)
# To be sent to Slack channels
diff_report = prepare_reports(
title="*Changed model modules failures*",
header=model_header,
reports=entries_changed,
)
model_failure_sections.append(
{"type": "section", "text": {"type": "mrkdwn", "text": diff_report}},
)
return model_failure_sections
@property
def additional_failures(self) -> Dict:
failures = {k: v["failed"] for k, v in self.additional_results.items()}
errors = {k: v["error"] for k, v in self.additional_results.items()}
individual_reports = []
for key, value in failures.items():
device_report = self.get_device_report(value)
if sum(value.values()) or errors[key]:
report = f"{key}"
if errors[key]:
report = f"[Errored out] {report}"
if device_report:
report = f"{device_report}{report}"
individual_reports.append(report)
header = "Single | Multi | Category\n"
failures_report = prepare_reports(
title="The following non-modeling tests had failures", header=header, reports=individual_reports
)
return {"type": "section", "text": {"type": "mrkdwn", "text": failures_report}}
@property
def payload(self) -> str:
blocks = [self.header]
if self.ci_title:
blocks.append(self.ci_title_section)
if self.n_model_failures > 0 or self.n_additional_failures > 0:
blocks.append(self.failures)
if self.n_model_failures > 0:
blocks.append(self.category_failures)
for block in self.model_failures:
if block["text"]["text"]:
blocks.append(block)
if self.n_additional_failures > 0:
blocks.append(self.additional_failures)
if self.n_model_failures == 0 and self.n_additional_failures == 0:
blocks.append(self.no_failures)
if len(self.selected_warnings) > 0:
blocks.append(self.warnings)
return json.dumps(blocks)
@staticmethod
def error_out(title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=False):
blocks = []
title_block = {"type": "header", "text": {"type": "plain_text", "text": title}}
blocks.append(title_block)
if ci_title:
ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}}
blocks.append(ci_title_block)
offline_runners = []
if runner_not_available:
text = "💔 CI runners are not available! Tests are not run. 😭"
result = os.environ.get("OFFLINE_RUNNERS")
if result is not None:
offline_runners = json.loads(result)
elif runner_failed:
text = "💔 CI runners have problems! Tests are not run. 😭"
elif setup_failed:
text = "💔 Setup job failed. Tests are not run. 😭"
else:
text = "💔 There was an issue running the tests. 😭"
error_block_1 = {
"type": "header",
"text": {
"type": "plain_text",
"text": text,
},
}
text = ""
if len(offline_runners) > 0:
text = "\n • " + "\n • ".join(offline_runners)
text = f"The following runners are offline:\n{text}\n\n"
text += "🙏 Let's fix it ASAP! 🙏"
error_block_2 = {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
blocks.extend([error_block_1, error_block_2])
payload = json.dumps(blocks)
print("Sending the following payload")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
text=text,
blocks=payload,
)
def post(self):
payload = self.payload
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(payload)}))
text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
self.thread_ts = client.chat_postMessage(
channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
blocks=payload,
text=text,
)
def get_reply_blocks(self, job_name, job_result, failures, device, text):
"""
failures: A list with elements of the form {"line": full test name, "trace": error trace}
"""
# `text` must be less than 3001 characters in Slack SDK
# keep some room for adding "[Truncated]" when necessary
MAX_ERROR_TEXT = 3000 - len("[Truncated]")
failure_text = ""
for idx, error in enumerate(failures):
new_text = failure_text + f'*{error["line"]}*\n_{error["trace"]}_\n\n'
if len(new_text) > MAX_ERROR_TEXT:
# `failure_text` here has length <= 3000
failure_text = failure_text + "[Truncated]"
break
# `failure_text` here has length <= MAX_ERROR_TEXT
failure_text = new_text
title = job_name
if device is not None:
title += f" ({device}-gpu)"
content = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
# TODO: Make sure we always have a valid job link (or at least a way not to break the report sending)
# Currently we get the device from a job's artifact name.
# If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`.
# This could be done by adding `machine_type` in a job's `strategy`.
# (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`)
if job_result["job_link"] is not None and job_result["job_link"][device] is not None:
content["accessory"] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_result["job_link"][device],
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failure_text}},
]
def post_reply(self):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
for device, failures in job_result["failures"].items():
text = "\n".join(
sorted([f"*{k}*: {v[device]}" for k, v in job_result["failed"].items() if v[device]])
)
blocks = self.get_reply_blocks(job, job_result, failures, device, text=text)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
text=f"Results for {job}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
for job, job_result in self.additional_results.items():
if len(job_result["failures"]):
for device, failures in job_result["failures"].items():
blocks = self.get_reply_blocks(
job,
job_result,
failures,
device,
text=f'Number of failures: {job_result["failed"][device]}',
)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
text=f"Results for {job}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
def retrieve_artifact(artifact_path: str, gpu: Optional[str]):
if gpu not in [None, "single", "multi"]:
raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.")
_artifact = {}
if os.path.exists(artifact_path):
files = os.listdir(artifact_path)
for file in files:
try:
with open(os.path.join(artifact_path, file)) as f:
_artifact[file.split(".")[0]] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(artifact_path, file)}.") from e
return _artifact
def retrieve_available_artifacts():
class Artifact:
def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False):
self.name = name
self.single_gpu = single_gpu
self.multi_gpu = multi_gpu
self.paths = []
def __str__(self):
return self.name
def add_path(self, path: str, gpu: str = None):
self.paths.append({"name": self.name, "path": path, "gpu": gpu})
_available_artifacts: Dict[str, Artifact] = {}
directories = filter(os.path.isdir, os.listdir())
for directory in directories:
artifact_name = directory
name_parts = artifact_name.split("_postfix_")
if len(name_parts) > 1:
artifact_name = name_parts[0]
if artifact_name.startswith("single-gpu"):
artifact_name = artifact_name[len("single-gpu") + 1 :]
if artifact_name in _available_artifacts:
_available_artifacts[artifact_name].single_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu="single")
elif artifact_name.startswith("multi-gpu"):
artifact_name = artifact_name[len("multi-gpu") + 1 :]
if artifact_name in _available_artifacts:
_available_artifacts[artifact_name].multi_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu="multi")
else:
if artifact_name not in _available_artifacts:
_available_artifacts[artifact_name] = Artifact(artifact_name)
_available_artifacts[artifact_name].add_path(directory)
return _available_artifacts
def prepare_reports(title, header, reports, to_truncate=True):
report = ""
MAX_ERROR_TEXT = 3000 - len("[Truncated]")
if not to_truncate:
MAX_ERROR_TEXT = float("inf")
if len(reports) > 0:
# `text` must be less than 3001 characters in Slack SDK
# keep some room for adding "[Truncated]" when necessary
for idx in range(len(reports)):
_report = header + "\n".join(reports[: idx + 1])
new_report = f"{title}:\n```\n{_report}\n```\n"
if len(new_report) > MAX_ERROR_TEXT:
# `report` here has length <= 3000
report = report + "[Truncated]"
break
report = new_report
return report
if __name__ == "__main__":
runner_status = os.environ.get("RUNNER_STATUS")
runner_env_status = os.environ.get("RUNNER_ENV_STATUS")
setup_status = os.environ.get("SETUP_STATUS")
runner_not_available = True if runner_status is not None and runner_status != "success" else False
runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False
setup_failed = True if setup_status is not None and setup_status != "success" else False
org = "huggingface"
repo = "transformers"
repository_full_name = f"{org}/{repo}"
# This env. variable is set in workflow file (under the job `send_results`).
ci_event = os.environ["CI_EVENT"]
# To find the PR number in a commit title, for example, `Add AwesomeFormer model (#99999)`
pr_number_re = re.compile(r"\(#(\d+)\)$")
title = f"🤗 Results of the {ci_event} tests."
# Add Commit/PR title with a link for push CI
# (check the title in 2 env. variables - depending on the CI is triggered via `push` or `workflow_run` event)
ci_title_push = os.environ.get("CI_TITLE_PUSH")
ci_title_workflow_run = os.environ.get("CI_TITLE_WORKFLOW_RUN")
ci_title = ci_title_push if ci_title_push else ci_title_workflow_run
ci_sha = os.environ.get("CI_SHA")
ci_url = None
if ci_sha:
ci_url = f"https://github.com/{repository_full_name}/commit/{ci_sha}"
if ci_title is not None:
if ci_url is None:
raise ValueError(
"When a title is found (`ci_title`), it means a `push` event or a `workflow_run` even (triggered by "
"another `push` event), and the commit SHA has to be provided in order to create the URL to the "
"commit page."
)
ci_title = ci_title.strip().split("\n")[0].strip()
# Retrieve the PR title and author login to complete the report
commit_number = ci_url.split("/")[-1]
ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/commits/{commit_number}"
ci_details = requests.get(ci_detail_url).json()
ci_author = ci_details["author"]["login"]
merged_by = None
# Find the PR number (if any) and change the url to the actual PR page.
numbers = pr_number_re.findall(ci_title)
if len(numbers) > 0:
pr_number = numbers[0]
ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/pulls/{pr_number}"
ci_details = requests.get(ci_detail_url).json()
ci_author = ci_details["user"]["login"]
ci_url = f"https://github.com/{repository_full_name}/pull/{pr_number}"
merged_by = ci_details["merged_by"]["login"]
if merged_by is None:
ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author}"
else:
ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author} | Merged by: {merged_by}"
elif ci_sha:
ci_title = f"<{ci_url}|commit: {ci_sha}>"
else:
ci_title = ""
if runner_not_available or runner_failed or setup_failed:
Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed)
exit(0)
arguments = sys.argv[1:][0]
try:
models = ast.literal_eval(arguments)
# Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names).
models = [x.replace("models/", "models_") for x in models]
except SyntaxError:
Message.error_out(title, ci_title)
raise ValueError("Errored out.")
github_actions_job_links = get_job_links(
workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"]
)
available_artifacts = retrieve_available_artifacts()
modeling_categories = [
"PyTorch",
"TensorFlow",
"Flax",
"Tokenizers",
"Pipelines",
"Trainer",
"ONNX",
"Auto",
"Unclassified",
]
# This dict will contain all the information relative to each model:
# - Failures: the total, as well as the number of failures per-category defined above
# - Success: total
# - Time spent: as a comma-separated list of elapsed time
# - Failures: as a line-break separated list of errors
model_results = {
model: {
"failed": {m: {"unclassified": 0, "single": 0, "multi": 0} for m in modeling_categories},
"success": 0,
"time_spent": "",
"failures": {},
"job_link": {},
}
for model in models
if f"run_all_tests_gpu_{model}_test_reports" in available_artifacts
}
unclassified_model_failures = []
# This prefix is used to get job links below. For past CI, we use `workflow_call`, which changes the job names from
# `Model tests (...)` to `PyTorch 1.5 / Model tests (...)` for example.
job_name_prefix = ""
if ci_event.startswith("Past CI - "):
framework, version = ci_event.replace("Past CI - ", "").split("-")
framework = "PyTorch" if framework == "pytorch" else "TensorFlow"
job_name_prefix = f"{framework} {version}"
elif ci_event.startswith("Nightly CI"):
job_name_prefix = "Nightly CI"
for model in model_results.keys():
for artifact_path in available_artifacts[f"run_all_tests_gpu_{model}_test_reports"].paths:
artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"])
if "stats" in artifact:
# Link to the GitHub Action job
# The job names use `matrix.folder` which contain things like `models/bert` instead of `models_bert`
job_name = f"Model tests ({model.replace('models_', 'models/')}, {artifact_path['gpu']}-gpu)"
if job_name_prefix:
job_name = f"{job_name_prefix} / {job_name}"
model_results[model]["job_link"][artifact_path["gpu"]] = github_actions_job_links.get(job_name)
failed, success, time_spent = handle_test_results(artifact["stats"])
model_results[model]["success"] += success
model_results[model]["time_spent"] += time_spent[1:-1] + ", "
stacktraces = handle_stacktraces(artifact["failures_line"])
for line in artifact["summary_short"].split("\n"):
if line.startswith("FAILED "):
line = line[len("FAILED ") :]
line = line.split()[0].replace("\n", "")
if artifact_path["gpu"] not in model_results[model]["failures"]:
model_results[model]["failures"][artifact_path["gpu"]] = []
model_results[model]["failures"][artifact_path["gpu"]].append(
{"line": line, "trace": stacktraces.pop(0)}
)
if re.search("test_modeling_tf_", line):
model_results[model]["failed"]["TensorFlow"][artifact_path["gpu"]] += 1
elif re.search("test_modeling_flax_", line):
model_results[model]["failed"]["Flax"][artifact_path["gpu"]] += 1
elif re.search("test_modeling", line):
model_results[model]["failed"]["PyTorch"][artifact_path["gpu"]] += 1
elif re.search("test_tokenization", line):
model_results[model]["failed"]["Tokenizers"][artifact_path["gpu"]] += 1
elif re.search("test_pipelines", line):
model_results[model]["failed"]["Pipelines"][artifact_path["gpu"]] += 1
elif re.search("test_trainer", line):
model_results[model]["failed"]["Trainer"][artifact_path["gpu"]] += 1
elif re.search("onnx", line):
model_results[model]["failed"]["ONNX"][artifact_path["gpu"]] += 1
elif re.search("auto", line):
model_results[model]["failed"]["Auto"][artifact_path["gpu"]] += 1
else:
model_results[model]["failed"]["Unclassified"][artifact_path["gpu"]] += 1
unclassified_model_failures.append(line)
# Additional runs
additional_files = {
"Examples directory": "run_examples_gpu",
"PyTorch pipelines": "run_tests_torch_pipeline_gpu",
"TensorFlow pipelines": "run_tests_tf_pipeline_gpu",
"Torch CUDA extension tests": "run_tests_torch_cuda_extensions_gpu_test_reports",
}
if ci_event in ["push", "Nightly CI"] or ci_event.startswith("Past CI"):
del additional_files["Examples directory"]
del additional_files["PyTorch pipelines"]
del additional_files["TensorFlow pipelines"]
additional_results = {
key: {
"failed": {"unclassified": 0, "single": 0, "multi": 0},
"success": 0,
"time_spent": "",
"error": False,
"failures": {},
"job_link": {},
}
for key in additional_files.keys()
}
for key in additional_results.keys():
# If a whole suite of test fails, the artifact isn't available.
if additional_files[key] not in available_artifacts:
additional_results[key]["error"] = True
continue
for artifact_path in available_artifacts[additional_files[key]].paths:
# Link to the GitHub Action job
job_name = key
if artifact_path["gpu"] is not None:
job_name = f"{key} ({artifact_path['gpu']}-gpu)"
if job_name_prefix:
job_name = f"{job_name_prefix} / {job_name}"
additional_results[key]["job_link"][artifact_path["gpu"]] = github_actions_job_links.get(job_name)
artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"])
stacktraces = handle_stacktraces(artifact["failures_line"])
failed, success, time_spent = handle_test_results(artifact["stats"])
additional_results[key]["failed"][artifact_path["gpu"] or "unclassified"] += failed
additional_results[key]["success"] += success
additional_results[key]["time_spent"] += time_spent[1:-1] + ", "
if len(artifact["errors"]):
additional_results[key]["error"] = True
if failed:
for line in artifact["summary_short"].split("\n"):
if line.startswith("FAILED "):
line = line[len("FAILED ") :]
line = line.split()[0].replace("\n", "")
if artifact_path["gpu"] not in additional_results[key]["failures"]:
additional_results[key]["failures"][artifact_path["gpu"]] = []
additional_results[key]["failures"][artifact_path["gpu"]].append(
{"line": line, "trace": stacktraces.pop(0)}
)
selected_warnings = []
if "warnings_in_ci" in available_artifacts:
directory = available_artifacts["warnings_in_ci"].paths[0]["path"]
with open(os.path.join(directory, "selected_warnings.json")) as fp:
selected_warnings = json.load(fp)
message = Message(title, ci_title, model_results, additional_results, selected_warnings=selected_warnings)
# send report only if there is any failure (for push CI)
if message.n_failures or ci_event != "push":
message.post()
message.post_reply()
| 41,883 | 39.545983 | 119 | py |
transformers | transformers-main/utils/add_pipeline_model_mapping_to_test.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to add and/or update the attribute `pipeline_model_mapping` in model test files.
This script will be (mostly) used in the following 2 situations:
- run within a (scheduled) CI job to:
- check if model test files in the library have updated `pipeline_model_mapping`,
- and/or update test files and (possibly) open a GitHub pull request automatically
- being run by a `transformers` member to quickly check and update some particular test file(s)
This script is **NOT** intended to be run (manually) by community contributors.
"""
import argparse
import glob
import inspect
import os
import re
import unittest
from get_test_info import get_test_classes
from tests.test_pipeline_mixin import pipeline_test_mapping
PIPELINE_TEST_MAPPING = {}
for task, _ in pipeline_test_mapping.items():
PIPELINE_TEST_MAPPING[task] = {"pt": None, "tf": None}
# DO **NOT** add item to this set (unless the reason is approved)
TEST_FILE_TO_IGNORE = {
"tests/models/esm/test_modeling_esmfold.py", # The pipeline test mapping is added to `test_modeling_esm.py`
}
def get_framework(test_class):
"""Infer the framework from the test class `test_class`."""
if "ModelTesterMixin" in [x.__name__ for x in test_class.__bases__]:
return "pt"
elif "TFModelTesterMixin" in [x.__name__ for x in test_class.__bases__]:
return "tf"
elif "FlaxModelTesterMixin" in [x.__name__ for x in test_class.__bases__]:
return "flax"
else:
return None
def get_mapping_for_task(task, framework):
"""Get mappings defined in `XXXPipelineTests` for the task `task`."""
# Use the cached results
if PIPELINE_TEST_MAPPING[task].get(framework, None) is not None:
return PIPELINE_TEST_MAPPING[task][framework]
pipeline_test_class = pipeline_test_mapping[task]["test"]
mapping = None
if framework == "pt":
mapping = getattr(pipeline_test_class, "model_mapping", None)
elif framework == "tf":
mapping = getattr(pipeline_test_class, "tf_model_mapping", None)
if mapping is not None:
mapping = dict(mapping.items())
# cache the results
PIPELINE_TEST_MAPPING[task][framework] = mapping
return mapping
def get_model_for_pipeline_test(test_class, task):
"""Get the model architecture(s) related to the test class `test_class` for a pipeline `task`."""
framework = get_framework(test_class)
if framework is None:
return None
mapping = get_mapping_for_task(task, framework)
if mapping is None:
return None
config_classes = list({model_class.config_class for model_class in test_class.all_model_classes})
if len(config_classes) != 1:
raise ValueError("There should be exactly one configuration class from `test_class.all_model_classes`.")
# This could be a list/tuple of model classes, but it's rare.
model_class = mapping.get(config_classes[0], None)
if isinstance(model_class, (tuple, list)):
model_class = sorted(model_class, key=lambda x: x.__name__)
return model_class
def get_pipeline_model_mapping(test_class):
"""Get `pipeline_model_mapping` for `test_class`."""
mapping = [(task, get_model_for_pipeline_test(test_class, task)) for task in pipeline_test_mapping]
mapping = sorted([(task, model) for task, model in mapping if model is not None], key=lambda x: x[0])
return dict(mapping)
def get_pipeline_model_mapping_string(test_class):
"""Get `pipeline_model_mapping` for `test_class` as a string (to be added to the test file).
This will be a 1-line string. After this is added to a test file, `make style` will format it beautifully.
"""
framework = get_framework(test_class)
if framework == "pt":
framework = "torch"
default_value = "{}"
mapping = get_pipeline_model_mapping(test_class)
if len(mapping) == 0:
return ""
texts = []
for task, model_classes in mapping.items():
if isinstance(model_classes, (tuple, list)):
# A list/tuple of model classes
value = "(" + ", ".join([x.__name__ for x in model_classes]) + ")"
else:
# A single model class
value = model_classes.__name__
texts.append(f'"{task}": {value}')
text = "{" + ", ".join(texts) + "}"
text = f"pipeline_model_mapping = {text} if is_{framework}_available() else {default_value}"
return text
def is_valid_test_class(test_class):
"""Restrict to `XXXModelTesterMixin` and should be a subclass of `unittest.TestCase`."""
base_class_names = {"ModelTesterMixin", "TFModelTesterMixin", "FlaxModelTesterMixin"}
if not issubclass(test_class, unittest.TestCase):
return False
return len(base_class_names.intersection([x.__name__ for x in test_class.__bases__])) > 0
def find_test_class(test_file):
"""Find a test class in `test_file` to which we will add `pipeline_model_mapping`."""
test_classes = [x for x in get_test_classes(test_file) if is_valid_test_class(x)]
target_test_class = None
for test_class in test_classes:
# If a test class has defined `pipeline_model_mapping`, let's take it
if getattr(test_class, "pipeline_model_mapping", None) is not None:
target_test_class = test_class
break
# Take the test class with the shortest name (just a heuristic)
if target_test_class is None and len(test_classes) > 0:
target_test_class = sorted(test_classes, key=lambda x: (len(x.__name__), x.__name__))[0]
return target_test_class
def find_block_ending(lines, start_idx, indent_level):
end_idx = start_idx
for idx, line in enumerate(lines[start_idx:]):
indent = len(line) - len(line.lstrip())
if idx == 0 or indent > indent_level or (indent == indent_level and line.strip() == ")"):
end_idx = start_idx + idx
elif idx > 0 and indent <= indent_level:
# Outside the definition block of `pipeline_model_mapping`
break
return end_idx
def add_pipeline_model_mapping(test_class, overwrite=False):
"""Add `pipeline_model_mapping` to `test_class`."""
if getattr(test_class, "pipeline_model_mapping", None) is not None:
if not overwrite:
return "", -1
line_to_add = get_pipeline_model_mapping_string(test_class)
if len(line_to_add) == 0:
return "", -1
line_to_add = line_to_add + "\n"
# The code defined the class `test_class`
class_lines, class_start_line_no = inspect.getsourcelines(test_class)
# `inspect` gives the code for an object, including decorator(s) if any.
# We (only) need the exact line of the class definition.
for idx, line in enumerate(class_lines):
if line.lstrip().startswith("class "):
class_lines = class_lines[idx:]
class_start_line_no += idx
break
class_end_line_no = class_start_line_no + len(class_lines) - 1
# The index in `class_lines` that starts the definition of `all_model_classes`, `all_generative_model_classes` or
# `pipeline_model_mapping`. This assumes they are defined in such order, and we take the start index of the last
# block that appears in a `test_class`.
start_idx = None
# The indent level of the line at `class_lines[start_idx]` (if defined)
indent_level = 0
# To record if `pipeline_model_mapping` is found in `test_class`.
def_line = None
for idx, line in enumerate(class_lines):
if line.strip().startswith("all_model_classes = "):
indent_level = len(line) - len(line.lstrip())
start_idx = idx
elif line.strip().startswith("all_generative_model_classes = "):
indent_level = len(line) - len(line.lstrip())
start_idx = idx
elif line.strip().startswith("pipeline_model_mapping = "):
indent_level = len(line) - len(line.lstrip())
start_idx = idx
def_line = line
break
if start_idx is None:
return "", -1
# Find the ending index (inclusive) of the above found block.
end_idx = find_block_ending(class_lines, start_idx, indent_level)
# Extract `is_xxx_available()` from existing blocks: some models require specific libraries like `timm` and use
# `is_timm_available()` instead of `is_torch_available()`.
# Keep leading and trailing whitespaces
r = re.compile(r"\s(is_\S+?_available\(\))\s")
for line in class_lines[start_idx : end_idx + 1]:
backend_condition = r.search(line)
if backend_condition is not None:
# replace the leading and trailing whitespaces to the space character " ".
target = " " + backend_condition[0][1:-1] + " "
line_to_add = r.sub(target, line_to_add)
break
if def_line is None:
# `pipeline_model_mapping` is not defined. The target index is set to the ending index (inclusive) of
# `all_model_classes` or `all_generative_model_classes`.
target_idx = end_idx
else:
# `pipeline_model_mapping` is defined. The target index is set to be one **BEFORE** its start index.
target_idx = start_idx - 1
# mark the lines of the currently existing `pipeline_model_mapping` to be removed.
for idx in range(start_idx, end_idx + 1):
# These lines are going to be removed before writing to the test file.
class_lines[idx] = None # noqa
# Make sure the test class is a subclass of `PipelineTesterMixin`.
parent_classes = [x.__name__ for x in test_class.__bases__]
if "PipelineTesterMixin" not in parent_classes:
# Put `PipelineTesterMixin` just before `unittest.TestCase`
_parent_classes = [x for x in parent_classes if x != "TestCase"] + ["PipelineTesterMixin"]
if "TestCase" in parent_classes:
# Here we **assume** the original string is always with `unittest.TestCase`.
_parent_classes.append("unittest.TestCase")
parent_classes = ", ".join(_parent_classes)
for idx, line in enumerate(class_lines):
# Find the ending of the declaration of `test_class`
if line.strip().endswith("):"):
# mark the lines of the declaration of `test_class` to be removed
for _idx in range(idx + 1):
class_lines[_idx] = None # noqa
break
# Add the new, one-line, class declaration for `test_class`
class_lines[0] = f"class {test_class.__name__}({parent_classes}):\n"
# Add indentation
line_to_add = " " * indent_level + line_to_add
# Insert `pipeline_model_mapping` to `class_lines`.
# (The line at `target_idx` should be kept by definition!)
class_lines = class_lines[: target_idx + 1] + [line_to_add] + class_lines[target_idx + 1 :]
# Remove the lines that are marked to be removed
class_lines = [x for x in class_lines if x is not None]
# Move from test class to module (in order to write to the test file)
module_lines = inspect.getsourcelines(inspect.getmodule(test_class))[0]
# Be careful with the 1-off between line numbers and array indices
module_lines = module_lines[: class_start_line_no - 1] + class_lines + module_lines[class_end_line_no:]
code = "".join(module_lines)
moddule_file = inspect.getsourcefile(test_class)
with open(moddule_file, "w", encoding="UTF-8", newline="\n") as fp:
fp.write(code)
return line_to_add
def add_pipeline_model_mapping_to_test_file(test_file, overwrite=False):
"""Add `pipeline_model_mapping` to `test_file`."""
test_class = find_test_class(test_file)
if test_class:
add_pipeline_model_mapping(test_class, overwrite=overwrite)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--test_file", type=str, help="A path to the test file, starting with the repository's `tests` directory."
)
parser.add_argument(
"--all",
action="store_true",
help="If to check and modify all test files.",
)
parser.add_argument(
"--overwrite",
action="store_true",
help="If to overwrite a test class if it has already defined `pipeline_model_mapping`.",
)
args = parser.parse_args()
if not args.all and not args.test_file:
raise ValueError("Please specify either `test_file` or pass `--all` to check/modify all test files.")
elif args.all and args.test_file:
raise ValueError("Only one of `--test_file` and `--all` could be specified.")
test_files = []
if args.test_file:
test_files = [args.test_file]
else:
pattern = os.path.join("tests", "models", "**", "test_modeling_*.py")
for test_file in glob.glob(pattern):
# `Flax` is not concerned at this moment
if not test_file.startswith("test_modeling_flax_"):
test_files.append(test_file)
for test_file in test_files:
if test_file in TEST_FILE_TO_IGNORE:
print(f"[SKIPPED] {test_file} is skipped as it is in `TEST_FILE_TO_IGNORE` in the file {__file__}.")
continue
add_pipeline_model_mapping_to_test_file(test_file, overwrite=args.overwrite)
| 13,870 | 40.038462 | 117 | py |
transformers | transformers-main/utils/check_repo.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import sys
import warnings
from collections import OrderedDict
from difflib import get_close_matches
from pathlib import Path
from transformers import is_flax_available, is_tf_available, is_torch_available
from transformers.models.auto import get_values
from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
from transformers.models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING_NAMES
from transformers.models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING_NAMES
from transformers.models.auto.processing_auto import PROCESSOR_MAPPING_NAMES
from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES
from transformers.utils import ENV_VARS_TRUE_VALUES, direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_repo.py
PATH_TO_TRANSFORMERS = "src/transformers"
PATH_TO_TESTS = "tests"
PATH_TO_DOC = "docs/source/en"
# Update this list with models that are supposed to be private.
PRIVATE_MODELS = [
"AltRobertaModel",
"DPRSpanPredictor",
"LongT5Stack",
"RealmBertModel",
"T5Stack",
"MT5Stack",
"UMT5Stack",
"SwitchTransformersStack",
"TFDPRSpanPredictor",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
"BridgeTowerTextModel",
"BridgeTowerVisionModel",
]
# Update this list for models that are not tested with a comment explaining the reason it should not be.
# Being in this list is an exception and should **not** be the rule.
IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
# models to ignore for not tested
"InstructBlipQFormerModel", # Building part of bigger (tested) model.
"NllbMoeDecoder",
"NllbMoeEncoder",
"UMT5EncoderModel", # Building part of bigger (tested) model.
"LlamaDecoder", # Building part of bigger (tested) model.
"Blip2QFormerModel", # Building part of bigger (tested) model.
"DetaEncoder", # Building part of bigger (tested) model.
"DetaDecoder", # Building part of bigger (tested) model.
"ErnieMForInformationExtraction",
"GraphormerEncoder", # Building part of bigger (tested) model.
"GraphormerDecoderHead", # Building part of bigger (tested) model.
"CLIPSegDecoder", # Building part of bigger (tested) model.
"TableTransformerEncoder", # Building part of bigger (tested) model.
"TableTransformerDecoder", # Building part of bigger (tested) model.
"TimeSeriesTransformerEncoder", # Building part of bigger (tested) model.
"TimeSeriesTransformerDecoder", # Building part of bigger (tested) model.
"InformerEncoder", # Building part of bigger (tested) model.
"InformerDecoder", # Building part of bigger (tested) model.
"AutoformerEncoder", # Building part of bigger (tested) model.
"AutoformerDecoder", # Building part of bigger (tested) model.
"JukeboxVQVAE", # Building part of bigger (tested) model.
"JukeboxPrior", # Building part of bigger (tested) model.
"DeformableDetrEncoder", # Building part of bigger (tested) model.
"DeformableDetrDecoder", # Building part of bigger (tested) model.
"OPTDecoder", # Building part of bigger (tested) model.
"FlaxWhisperDecoder", # Building part of bigger (tested) model.
"FlaxWhisperEncoder", # Building part of bigger (tested) model.
"WhisperDecoder", # Building part of bigger (tested) model.
"WhisperEncoder", # Building part of bigger (tested) model.
"DecisionTransformerGPT2Model", # Building part of bigger (tested) model.
"SegformerDecodeHead", # Building part of bigger (tested) model.
"PLBartEncoder", # Building part of bigger (tested) model.
"PLBartDecoder", # Building part of bigger (tested) model.
"PLBartDecoderWrapper", # Building part of bigger (tested) model.
"BigBirdPegasusEncoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model.
"DetrEncoder", # Building part of bigger (tested) model.
"DetrDecoder", # Building part of bigger (tested) model.
"DetrDecoderWrapper", # Building part of bigger (tested) model.
"ConditionalDetrEncoder", # Building part of bigger (tested) model.
"ConditionalDetrDecoder", # Building part of bigger (tested) model.
"M2M100Encoder", # Building part of bigger (tested) model.
"M2M100Decoder", # Building part of bigger (tested) model.
"MCTCTEncoder", # Building part of bigger (tested) model.
"MgpstrModel", # Building part of bigger (tested) model.
"Speech2TextEncoder", # Building part of bigger (tested) model.
"Speech2TextDecoder", # Building part of bigger (tested) model.
"LEDEncoder", # Building part of bigger (tested) model.
"LEDDecoder", # Building part of bigger (tested) model.
"BartDecoderWrapper", # Building part of bigger (tested) model.
"BartEncoder", # Building part of bigger (tested) model.
"BertLMHeadModel", # Needs to be setup as decoder.
"BlenderbotSmallEncoder", # Building part of bigger (tested) model.
"BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model.
"BlenderbotEncoder", # Building part of bigger (tested) model.
"BlenderbotDecoderWrapper", # Building part of bigger (tested) model.
"MBartEncoder", # Building part of bigger (tested) model.
"MBartDecoderWrapper", # Building part of bigger (tested) model.
"MegatronBertLMHeadModel", # Building part of bigger (tested) model.
"MegatronBertEncoder", # Building part of bigger (tested) model.
"MegatronBertDecoder", # Building part of bigger (tested) model.
"MegatronBertDecoderWrapper", # Building part of bigger (tested) model.
"MusicgenDecoder", # Building part of bigger (tested) model.
"MvpDecoderWrapper", # Building part of bigger (tested) model.
"MvpEncoder", # Building part of bigger (tested) model.
"PegasusEncoder", # Building part of bigger (tested) model.
"PegasusDecoderWrapper", # Building part of bigger (tested) model.
"PegasusXEncoder", # Building part of bigger (tested) model.
"PegasusXDecoder", # Building part of bigger (tested) model.
"PegasusXDecoderWrapper", # Building part of bigger (tested) model.
"DPREncoder", # Building part of bigger (tested) model.
"ProphetNetDecoderWrapper", # Building part of bigger (tested) model.
"RealmBertModel", # Building part of bigger (tested) model.
"RealmReader", # Not regular model.
"RealmScorer", # Not regular model.
"RealmForOpenQA", # Not regular model.
"ReformerForMaskedLM", # Needs to be setup as decoder.
"Speech2Text2DecoderWrapper", # Building part of bigger (tested) model.
"TFDPREncoder", # Building part of bigger (tested) model.
"TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)
"TFRobertaForMultipleChoice", # TODO: fix
"TFRobertaPreLayerNormForMultipleChoice", # TODO: fix
"TrOCRDecoderWrapper", # Building part of bigger (tested) model.
"TFWhisperEncoder", # Building part of bigger (tested) model.
"TFWhisperDecoder", # Building part of bigger (tested) model.
"SeparableConv1D", # Building part of bigger (tested) model.
"FlaxBartForCausalLM", # Building part of bigger (tested) model.
"FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM.
"OPTDecoderWrapper",
"TFSegformerDecodeHead", # Not a regular model.
"AltRobertaModel", # Building part of bigger (tested) model.
"BlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models
"TFBlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models
"BridgeTowerTextModel", # No need to test it as it is tested by BridgeTowerModel model.
"BridgeTowerVisionModel", # No need to test it as it is tested by BridgeTowerModel model.
"SpeechT5Decoder", # Building part of bigger (tested) model.
"SpeechT5DecoderWithoutPrenet", # Building part of bigger (tested) model.
"SpeechT5DecoderWithSpeechPrenet", # Building part of bigger (tested) model.
"SpeechT5DecoderWithTextPrenet", # Building part of bigger (tested) model.
"SpeechT5Encoder", # Building part of bigger (tested) model.
"SpeechT5EncoderWithoutPrenet", # Building part of bigger (tested) model.
"SpeechT5EncoderWithSpeechPrenet", # Building part of bigger (tested) model.
"SpeechT5EncoderWithTextPrenet", # Building part of bigger (tested) model.
"SpeechT5SpeechDecoder", # Building part of bigger (tested) model.
"SpeechT5SpeechEncoder", # Building part of bigger (tested) model.
"SpeechT5TextDecoder", # Building part of bigger (tested) model.
"SpeechT5TextEncoder", # Building part of bigger (tested) model.
"BarkCausalModel", # Building part of bigger (tested) model.
"BarkModel", # Does not have a forward signature - generation tested with integration tests
]
# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't
# trigger the common tests.
TEST_FILES_WITH_NO_COMMON_TESTS = [
"models/decision_transformer/test_modeling_decision_transformer.py",
"models/camembert/test_modeling_camembert.py",
"models/mt5/test_modeling_flax_mt5.py",
"models/mbart/test_modeling_mbart.py",
"models/mt5/test_modeling_mt5.py",
"models/pegasus/test_modeling_pegasus.py",
"models/camembert/test_modeling_tf_camembert.py",
"models/mt5/test_modeling_tf_mt5.py",
"models/xlm_roberta/test_modeling_tf_xlm_roberta.py",
"models/xlm_roberta/test_modeling_flax_xlm_roberta.py",
"models/xlm_prophetnet/test_modeling_xlm_prophetnet.py",
"models/xlm_roberta/test_modeling_xlm_roberta.py",
"models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py",
"models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py",
"models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py",
"models/decision_transformer/test_modeling_decision_transformer.py",
"models/bark/test_modeling_bark.py",
]
# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and
# should **not** be the rule.
IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
# models to ignore for model xxx mapping
"AlignTextModel",
"AlignVisionModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
"Blip2ForConditionalGeneration",
"Blip2QFormerModel",
"Blip2VisionModel",
"ErnieMForInformationExtraction",
"GitVisionModel",
"GraphormerModel",
"GraphormerForGraphClassification",
"BlipForConditionalGeneration",
"BlipForImageTextRetrieval",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextLMHeadModel",
"BlipTextModel",
"TFBlipForConditionalGeneration",
"TFBlipForImageTextRetrieval",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextLMHeadModel",
"TFBlipTextModel",
"Swin2SRForImageSuperResolution",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerForContrastiveLearning",
"CLIPSegForImageSegmentation",
"CLIPSegVisionModel",
"CLIPSegTextModel",
"EsmForProteinFolding",
"GPTSanJapaneseModel",
"TimeSeriesTransformerForPrediction",
"InformerForPrediction",
"AutoformerForPrediction",
"JukeboxVQVAE",
"JukeboxPrior",
"PegasusXEncoder",
"PegasusXDecoder",
"PegasusXDecoderWrapper",
"PegasusXEncoder",
"PegasusXDecoder",
"PegasusXDecoderWrapper",
"SamModel",
"DPTForDepthEstimation",
"DecisionTransformerGPT2Model",
"GLPNForDepthEstimation",
"ViltForImagesAndTextClassification",
"ViltForImageAndTextRetrieval",
"ViltForTokenClassification",
"ViltForMaskedLM",
"XGLMEncoder",
"XGLMDecoder",
"XGLMDecoderWrapper",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"SegformerDecodeHead",
"TFSegformerDecodeHead",
"FlaxBeitForMaskedImageModeling",
"PLBartEncoder",
"PLBartDecoder",
"PLBartDecoderWrapper",
"BeitForMaskedImageModeling",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
"GroupViTTextModel",
"GroupViTVisionModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
"FlaxCLIPTextModel",
"FlaxCLIPVisionModel",
"FlaxWav2Vec2ForCTC",
"DetrForSegmentation",
"Pix2StructVisionModel",
"Pix2StructTextModel",
"Pix2StructForConditionalGeneration",
"ConditionalDetrForSegmentation",
"DPRReader",
"FlaubertForQuestionAnswering",
"FlavaImageCodebook",
"FlavaTextModel",
"FlavaImageModel",
"FlavaMultimodalModel",
"GPT2DoubleHeadsModel",
"GPTSw3DoubleHeadsModel",
"InstructBlipVisionModel",
"InstructBlipQFormerModel",
"LayoutLMForQuestionAnswering",
"LukeForMaskedLM",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"MgpstrModel",
"OpenAIGPTDoubleHeadsModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
"RagModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"RealmEmbedder",
"RealmForOpenQA",
"RealmScorer",
"RealmReader",
"TFDPRReader",
"TFGPT2DoubleHeadsModel",
"TFLayoutLMForQuestionAnswering",
"TFOpenAIGPTDoubleHeadsModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"Wav2Vec2ForCTC",
"HubertForCTC",
"SEWForCTC",
"SEWDForCTC",
"XLMForQuestionAnswering",
"XLNetForQuestionAnswering",
"SeparableConv1D",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertForQuestionAnswering",
"VisualBertForMultipleChoice",
"TFWav2Vec2ForCTC",
"TFHubertForCTC",
"XCLIPVisionModel",
"XCLIPTextModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
"AltRobertaModel",
"TvltForAudioVisualClassification",
"BarkCausalModel",
"BarkCoarseModel",
"BarkFineModel",
"BarkSemanticModel",
"MusicgenModel",
"MusicgenForConditionalGeneration",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5HifiGan",
]
# DO NOT edit this list!
# (The corresponding pytorch objects should never be in the main `__init__`, but it's too late to remove)
OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK = [
"FlaxBertLayer",
"FlaxBigBirdLayer",
"FlaxRoFormerLayer",
"TFBertLayer",
"TFLxmertEncoder",
"TFLxmertXLayer",
"TFMPNetLayer",
"TFMobileBertLayer",
"TFSegformerLayer",
"TFViTMAELayer",
]
# Update this list for models that have multiple model types for the same
# model doc
MODEL_TYPE_TO_DOC_MAPPING = OrderedDict(
[
("data2vec-text", "data2vec"),
("data2vec-audio", "data2vec"),
("data2vec-vision", "data2vec"),
("donut-swin", "donut"),
]
)
# This is to make sure the transformers module imported is the one in the repo.
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
def check_missing_backends():
missing_backends = []
if not is_torch_available():
missing_backends.append("PyTorch")
if not is_tf_available():
missing_backends.append("TensorFlow")
if not is_flax_available():
missing_backends.append("Flax")
if len(missing_backends) > 0:
missing = ", ".join(missing_backends)
if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
raise Exception(
"Full repo consistency checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}."
)
else:
warnings.warn(
"Full repo consistency checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you "
"didn't make any change in one of those backends modeling files, you should probably execute the "
"command above to be on the safe side."
)
def check_model_list():
"""Check the model list inside the transformers library."""
# Get the models from the directory structure of `src/transformers/models/`
models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models")
_models = []
for model in os.listdir(models_dir):
if model == "deprecated":
continue
model_dir = os.path.join(models_dir, model)
if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir):
_models.append(model)
# Get the models from the directory structure of `src/transformers/models/`
models = [model for model in dir(transformers.models) if not model.startswith("__")]
missing_models = sorted(set(_models).difference(models))
if missing_models:
raise Exception(
f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}."
)
# If some modeling modules should be ignored for all checks, they should be added in the nested list
# _ignore_modules of this function.
def get_model_modules():
"""Get the model modules inside the transformers library."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_mmbt",
"modeling_outputs",
"modeling_retribert",
"modeling_utils",
"modeling_flax_auto",
"modeling_flax_encoder_decoder",
"modeling_flax_utils",
"modeling_speech_encoder_decoder",
"modeling_flax_speech_encoder_decoder",
"modeling_flax_vision_encoder_decoder",
"modeling_timm_backbone",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_encoder_decoder",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
"modeling_tf_vision_encoder_decoder",
"modeling_vision_encoder_decoder",
]
modules = []
for model in dir(transformers.models):
if model == "deprecated":
continue
# There are some magic dunder attributes in the dir, we ignore them
if not model.startswith("__"):
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules
def get_models(module, include_pretrained=False):
"""Get the objects in module that are models."""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
def is_a_private_model(model):
"""Returns True if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
if model.endswith("Prenet"):
return True
return False
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.")
# If some test_modeling files should be ignored when checking models are all tested, they should be added in the
# nested list _ignore_files of this function.
def get_model_test_files():
"""Get the model test files.
The returned files should NOT contain the `tests` (i.e. `PATH_TO_TESTS` defined in this script). They will be
considered as paths relative to `tests`. A caller has to use `os.path.join(PATH_TO_TESTS, ...)` to access the files.
"""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_flax_encoder_decoder",
"test_modeling_flax_speech_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
"test_modeling_tf_encoder_decoder",
]
test_files = []
# Check both `PATH_TO_TESTS` and `PATH_TO_TESTS/models`
model_test_root = os.path.join(PATH_TO_TESTS, "models")
model_test_dirs = []
for x in os.listdir(model_test_root):
x = os.path.join(model_test_root, x)
if os.path.isdir(x):
model_test_dirs.append(x)
for target_dir in [PATH_TO_TESTS] + model_test_dirs:
for file_or_dir in os.listdir(target_dir):
path = os.path.join(target_dir, file_or_dir)
if os.path.isfile(path):
filename = os.path.split(path)[-1]
if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files:
file = os.path.join(*path.split(os.sep)[1:])
test_files.append(file)
return test_files
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class
# for the all_model_classes variable.
def find_tested_models(test_file):
"""Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file]
if len(test_file) == 0:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
elif len(test_file) > 1:
failures.append(f"{module.__name__} has several test files: {test_file}.")
else:
test_file = test_file[0]
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def get_all_auto_configured_models():
"""Return the list of all models in at least one auto class."""
result = set() # To avoid duplicates we concatenate all model classes in a set.
if is_torch_available():
for attr_name in dir(transformers.models.auto.modeling_auto):
if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name)))
if is_tf_available():
for attr_name in dir(transformers.models.auto.modeling_tf_auto):
if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name)))
if is_flax_available():
for attr_name in dir(transformers.models.auto.modeling_flax_auto):
if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name)))
return list(result)
def ignore_unautoclassed(model_name):
"""Rules to determine if `name` should be in an auto class."""
# Special white list
if model_name in IGNORE_NON_AUTO_CONFIGURED:
return True
# Encoder and Decoder should be ignored
if "Encoder" in model_name or "Decoder" in model_name:
return True
return False
def check_models_are_auto_configured(module, all_auto_models):
"""Check models defined in module are each in an auto class."""
defined_models = get_models(module)
failures = []
for model_name, _ in defined_models:
if model_name not in all_auto_models and not ignore_unautoclassed(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. "
"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file "
"`utils/check_repo.py`."
)
return failures
def check_all_models_are_auto_configured():
"""Check all models are each in an auto class."""
check_missing_backends()
modules = get_model_modules()
all_auto_models = get_all_auto_configured_models()
failures = []
for module in modules:
new_failures = check_models_are_auto_configured(module, all_auto_models)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def check_all_auto_object_names_being_defined():
"""Check all names defined in auto (name) mappings exist in the library."""
check_missing_backends()
failures = []
mappings_to_check = {
"TOKENIZER_MAPPING_NAMES": TOKENIZER_MAPPING_NAMES,
"IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES,
"FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES,
"PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES,
}
# Each auto modeling files contains multiple mappings. Let's get them in a dynamic way.
for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]:
module = getattr(transformers.models.auto, module_name, None)
if module is None:
continue
# all mappings in a single auto modeling file
mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for name, mapping in mappings_to_check.items():
for model_type, class_names in mapping.items():
if not isinstance(class_names, tuple):
class_names = (class_names,)
for class_name in class_names:
if class_name is None:
continue
# dummy object is accepted
if not hasattr(transformers, class_name):
# If the class name is in a model name mapping, let's not check if there is a definition in any modeling
# module, if it's a private model defined in this file.
if name.endswith("MODEL_MAPPING_NAMES") and is_a_private_model(class_name):
continue
failures.append(
f"`{class_name}` appears in the mapping `{name}` but it is not defined in the library."
)
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def check_all_auto_mapping_names_in_config_mapping_names():
"""Check all keys defined in auto mappings (mappings of names) appear in `CONFIG_MAPPING_NAMES`."""
check_missing_backends()
failures = []
# `TOKENIZER_PROCESSOR_MAPPING_NAMES` and `AutoTokenizer` is special, and don't need to follow the rule.
mappings_to_check = {
"IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES,
"FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES,
"PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES,
}
# Each auto modeling files contains multiple mappings. Let's get them in a dynamic way.
for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]:
module = getattr(transformers.models.auto, module_name, None)
if module is None:
continue
# all mappings in a single auto modeling file
mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for name, mapping in mappings_to_check.items():
for model_type, class_names in mapping.items():
if model_type not in CONFIG_MAPPING_NAMES:
failures.append(
f"`{model_type}` appears in the mapping `{name}` but it is not defined in the keys of "
"`CONFIG_MAPPING_NAMES`."
)
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def check_all_auto_mappings_importable():
"""Check all auto mappings could be imported."""
check_missing_backends()
failures = []
mappings_to_check = {}
# Each auto modeling files contains multiple mappings. Let's get them in a dynamic way.
for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]:
module = getattr(transformers.models.auto, module_name, None)
if module is None:
continue
# all mappings in a single auto modeling file
mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for name, _ in mappings_to_check.items():
name = name.replace("_MAPPING_NAMES", "_MAPPING")
if not hasattr(transformers, name):
failures.append(f"`{name}`")
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def check_objects_being_equally_in_main_init():
"""Check if an object is in the main __init__ if its counterpart in PyTorch is."""
attrs = dir(transformers)
failures = []
for attr in attrs:
obj = getattr(transformers, attr)
if hasattr(obj, "__module__"):
module_path = obj.__module__
if "models.deprecated" in module_path:
continue
module_name = module_path.split(".")[-1]
module_dir = ".".join(module_path.split(".")[:-1])
if (
module_name.startswith("modeling_")
and not module_name.startswith("modeling_tf_")
and not module_name.startswith("modeling_flax_")
):
parent_module = sys.modules[module_dir]
frameworks = []
if is_tf_available():
frameworks.append("TF")
if is_flax_available():
frameworks.append("Flax")
for framework in frameworks:
other_module_path = module_path.replace("modeling_", f"modeling_{framework.lower()}_")
if os.path.isfile("src/" + other_module_path.replace(".", "/") + ".py"):
other_module_name = module_name.replace("modeling_", f"modeling_{framework.lower()}_")
other_module = getattr(parent_module, other_module_name)
if hasattr(other_module, f"{framework}{attr}"):
if not hasattr(transformers, f"{framework}{attr}"):
if f"{framework}{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK:
failures.append(f"{framework}{attr}")
if hasattr(other_module, f"{framework}_{attr}"):
if not hasattr(transformers, f"{framework}_{attr}"):
if f"{framework}_{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK:
failures.append(f"{framework}_{attr}")
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
_re_decorator = re.compile(r"^\s*@(\S+)\s+$")
def check_decorator_order(filename):
"""Check that in the test file `filename` the slow decorator is always last."""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith("parameterized"):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors
def check_all_decorator_order():
"""Check that in all test files, the slow decorator is always last."""
errors = []
for fname in os.listdir(PATH_TO_TESTS):
if fname.endswith(".py"):
filename = os.path.join(PATH_TO_TESTS, fname)
new_errors = check_decorator_order(filename)
errors += [f"- {filename}, line {i}" for i in new_errors]
if len(errors) > 0:
msg = "\n".join(errors)
raise ValueError(
"The parameterized decorator (and its variants) should always be first, but this is not the case in the"
f" following files:\n{msg}"
)
def find_all_documented_objects():
"""Parse the content of all doc files to detect which classes and functions it documents"""
documented_obj = []
for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
for doc_file in Path(PATH_TO_DOC).glob("**/*.md"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
return documented_obj
# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.
DEPRECATED_OBJECTS = [
"AutoModelWithLMHead",
"BartPretrainedModel",
"DataCollator",
"DataCollatorForSOP",
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"PretrainedBartModel",
"PretrainedFSMTModel",
"SingleSentenceClassificationProcessor",
"SquadDataTrainingArguments",
"SquadDataset",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"TFAutoModelWithLMHead",
"TFBartPretrainedModel",
"TextDataset",
"TextDatasetForNextSentencePrediction",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2Tokenizer",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
"TFTrainer",
"TFTrainingArguments",
]
# Exceptionally, some objects should not be documented after all rules passed.
# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!
UNDOCUMENTED_OBJECTS = [
"AddedToken", # This is a tokenizers class.
"BasicTokenizer", # Internal, should never have been in the main init.
"CharacterTokenizer", # Internal, should never have been in the main init.
"DPRPretrainedReader", # Like an Encoder.
"DummyObject", # Just picked by mistake sometimes.
"MecabTokenizer", # Internal, should never have been in the main init.
"ModelCard", # Internal type.
"SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer)
"TFDPRPretrainedReader", # Like an Encoder.
"TransfoXLCorpus", # Internal type.
"WordpieceTokenizer", # Internal, should never have been in the main init.
"absl", # External module
"add_end_docstrings", # Internal, should never have been in the main init.
"add_start_docstrings", # Internal, should never have been in the main init.
"convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights
"logger", # Internal logger
"logging", # External module
"requires_backends", # Internal function
"AltRobertaModel", # Internal module
"FalconConfig", # TODO Matt Remove this and re-add the docs once TGI is ready
"FalconForCausalLM",
"FalconForQuestionAnswering",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconModel",
]
# This list should be empty. Objects in it should get their own doc page.
SHOULD_HAVE_THEIR_OWN_PAGE = [
# Benchmarks
"PyTorchBenchmark",
"PyTorchBenchmarkArguments",
"TensorFlowBenchmark",
"TensorFlowBenchmarkArguments",
"AutoBackbone",
"BitBackbone",
"ConvNextBackbone",
"ConvNextV2Backbone",
"DinatBackbone",
"FocalNetBackbone",
"MaskFormerSwinBackbone",
"MaskFormerSwinConfig",
"MaskFormerSwinModel",
"NatBackbone",
"ResNetBackbone",
"SwinBackbone",
"TimmBackbone",
"TimmBackboneConfig",
]
def ignore_undocumented(name):
"""Rules to determine if `name` should be undocumented."""
# NOT DOCUMENTED ON PURPOSE.
# Constants uppercase are not documented.
if name.isupper():
return True
# PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.
if (
name.endswith("PreTrainedModel")
or name.endswith("Decoder")
or name.endswith("Encoder")
or name.endswith("Layer")
or name.endswith("Embeddings")
or name.endswith("Attention")
):
return True
# Submodules are not documented.
if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(
os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py")
):
return True
# All load functions are not documented.
if name.startswith("load_tf") or name.startswith("load_pytorch"):
return True
# is_xxx_available functions are not documented.
if name.startswith("is_") and name.endswith("_available"):
return True
# Deprecated objects are not documented.
if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:
return True
# MMBT model does not really work.
if name.startswith("MMBT"):
return True
if name in SHOULD_HAVE_THEIR_OWN_PAGE:
return True
return False
def check_all_objects_are_documented():
"""Check all models are properly documented."""
documented_objs = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")]
undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)]
if len(undocumented_objs) > 0:
raise Exception(
"The following objects are in the public init so should be documented:\n - "
+ "\n - ".join(undocumented_objs)
)
check_docstrings_are_in_md()
check_model_type_doc_match()
def check_model_type_doc_match():
"""Check all doc pages have a corresponding model type."""
model_doc_folder = Path(PATH_TO_DOC) / "model_doc"
model_docs = [m.stem for m in model_doc_folder.glob("*.md")]
model_types = list(transformers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys())
model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types]
errors = []
for m in model_docs:
if m not in model_types and m != "auto":
close_matches = get_close_matches(m, model_types)
error_message = f"{m} is not a proper model identifier."
if len(close_matches) > 0:
close_matches = "/".join(close_matches)
error_message += f" Did you mean {close_matches}?"
errors.append(error_message)
if len(errors) > 0:
raise ValueError(
"Some model doc pages do not match any existing model type:\n"
+ "\n".join(errors)
+ "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in "
"models/auto/configuration_auto.py."
)
# Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`.
_re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`")
# Re pattern to catch things between double backquotes.
_re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)")
# Re pattern to catch example introduction.
_re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE)
def is_rst_docstring(docstring):
"""
Returns `True` if `docstring` is written in rst.
"""
if _re_rst_special_words.search(docstring) is not None:
return True
if _re_double_backquotes.search(docstring) is not None:
return True
if _re_rst_example.search(docstring) is not None:
return True
return False
def check_docstrings_are_in_md():
"""Check all docstrings are in md"""
files_with_rst = []
for file in Path(PATH_TO_TRANSFORMERS).glob("**/*.py"):
with open(file, encoding="utf-8") as f:
code = f.read()
docstrings = code.split('"""')
for idx, docstring in enumerate(docstrings):
if idx % 2 == 0 or not is_rst_docstring(docstring):
continue
files_with_rst.append(file)
break
if len(files_with_rst) > 0:
raise ValueError(
"The following files have docstrings written in rst:\n"
+ "\n".join([f"- {f}" for f in files_with_rst])
+ "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n"
"(`pip install git+https://github.com/huggingface/doc-builder`)"
)
def check_deprecated_constant_is_up_to_date():
deprecated_folder = os.path.join(PATH_TO_TRANSFORMERS, "models", "deprecated")
deprecated_models = [m for m in os.listdir(deprecated_folder) if not m.startswith("_")]
constant_to_check = transformers.models.auto.configuration_auto.DEPRECATED_MODELS
message = []
missing_models = sorted(set(deprecated_models) - set(constant_to_check))
if len(missing_models) != 0:
missing_models = ", ".join(missing_models)
message.append(
"The following models are in the deprecated folder, make sur to add them to `DEPRECATED_MODELS` in "
f"`models/auto/configuration_auto.py`: {missing_models}."
)
extra_models = sorted(set(constant_to_check) - set(deprecated_models))
if len(extra_models) != 0:
extra_models = ", ".join(extra_models)
message.append(
"The following models are in the `DEPRECATED_MODELS` constant but not in the deprecated folder. Either "
f"remove them from the constant or move to the deprecated folder: {extra_models}."
)
if len(message) > 0:
raise Exception("\n".join(message))
def check_repo_quality():
"""Check all models are properly tested and documented."""
print("Checking all models are included.")
check_model_list()
print("Checking all models are public.")
check_models_are_in_init()
print("Checking all models are properly tested.")
check_all_decorator_order()
check_all_models_are_tested()
print("Checking all objects are properly documented.")
check_all_objects_are_documented()
print("Checking all models are in at least one auto class.")
check_all_models_are_auto_configured()
print("Checking all names in auto name mappings are defined.")
check_all_auto_object_names_being_defined()
print("Checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.")
check_all_auto_mapping_names_in_config_mapping_names()
print("Checking all auto mappings could be imported.")
check_all_auto_mappings_importable()
print("Checking all objects are equally (across frameworks) in the main __init__.")
check_objects_being_equally_in_main_init()
print("Checking the DEPRECATED_MODELS constant is up to date.")
check_deprecated_constant_is_up_to_date()
if __name__ == "__main__":
check_repo_quality()
| 48,398 | 41.492537 | 128 | py |
transformers | transformers-main/utils/check_build.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
FILES_TO_FIND = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def test_custom_files_are_present(transformers_path):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
args = parser.parse_args()
if args.check_lib:
transformers_module = importlib.import_module("transformers")
transformers_path = Path(transformers_module.__file__).parent
else:
transformers_path = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 1,807 | 35.897959 | 117 | py |
transformers | transformers-main/utils/get_ci_error_statistics.py | import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def get_job_links(workflow_run_id, token=None):
"""Extract job names and their job links in a GitHub Actions workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
result = requests.get(url, headers=headers).json()
job_links = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
def get_artifacts_links(worflow_run_id, token=None):
"""Get all artifact links from a workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
result = requests.get(url, headers=headers).json()
artifacts = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
def download_artifact(artifact_name, artifact_url, output_dir, token):
"""Download a GitHub Action artifact from a URL.
The URL is of the form `https://api.github.com/repos/huggingface/transformers/actions/artifacts/{ARTIFACT_ID}/zip`,
but it can't be used to download directly. We need to get a redirect URL first.
See https://docs.github.com/en/rest/actions/artifacts#download-an-artifact
"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
result = requests.get(artifact_url, headers=headers, allow_redirects=False)
download_url = result.headers["Location"]
response = requests.get(download_url, allow_redirects=True)
file_path = os.path.join(output_dir, f"{artifact_name}.zip")
with open(file_path, "wb") as fp:
fp.write(response.content)
def get_errors_from_single_artifact(artifact_zip_path, job_links=None):
"""Extract errors from a downloaded artifact (in .zip format)"""
errors = []
failed_tests = []
job_name = None
with zipfile.ZipFile(artifact_zip_path) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(filename) as f:
for line in f:
line = line.decode("UTF-8").strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
error_line = line[: line.index(": ")]
error = line[line.index(": ") + len(": ") :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED "):
# `test` is the test method that failed
test = line[len("FAILED ") :]
failed_tests.append(test)
elif filename == "job_name.txt":
job_name = line
if len(errors) != len(failed_tests):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(errors)} for `errors` "
f"and {len(failed_tests)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem."
)
job_link = None
if job_name and job_links:
job_link = job_links.get(job_name, None)
# A list with elements of the form (line of error, error, failed test)
result = [x + [y] + [job_link] for x, y in zip(errors, failed_tests)]
return result
def get_all_errors(artifact_dir, job_links=None):
"""Extract errors from all artifact files"""
errors = []
paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if p.endswith(".zip")]
for p in paths:
errors.extend(get_errors_from_single_artifact(p, job_links=job_links))
return errors
def reduce_by_error(logs, error_filter=None):
"""count each error"""
counter = Counter()
counter.update([x[1] for x in logs])
counts = counter.most_common()
r = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
r[error] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True))
return r
def get_model(test):
"""Get the model name from a test method"""
test = test.split("::")[0]
if test.startswith("tests/models/"):
test = test.split("/")[2]
else:
test = None
return test
def reduce_by_model(logs, error_filter=None):
"""count each error per model"""
logs = [(x[0], x[1], get_model(x[2])) for x in logs]
logs = [x for x in logs if x[2] is not None]
tests = {x[2] for x in logs}
r = {}
for test in tests:
counter = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
counts = counter.most_common()
error_counts = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
n_errors = sum(error_counts.values())
if n_errors > 0:
r[test] = {"count": n_errors, "errors": error_counts}
r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True))
return r
def make_github_table(reduced_by_error):
header = "| no. | error | status |"
sep = "|-:|:-|:-|"
lines = [header, sep]
for error in reduced_by_error:
count = reduced_by_error[error]["count"]
line = f"| {count} | {error[:100]} | |"
lines.append(line)
return "\n".join(lines)
def make_github_table_per_model(reduced_by_model):
header = "| model | no. of errors | major error | count |"
sep = "|-:|-:|-:|-:|"
lines = [header, sep]
for model in reduced_by_model:
count = reduced_by_model[model]["count"]
error, _count = list(reduced_by_model[model]["errors"].items())[0]
line = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(line)
return "\n".join(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_job_links = get_job_links(args.workflow_run_id, token=args.token)
job_links = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
index = k.find(" / ")
k = k[index + len(" / ") :]
job_links[k] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
artifacts = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
errors = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
counter = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
most_common = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
reduced_by_error = reduce_by_error(errors)
reduced_by_model = reduce_by_model(errors)
s1 = make_github_table(reduced_by_error)
s2 = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(s1)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(s2)
| 10,403 | 36.42446 | 120 | py |
transformers | transformers-main/utils/check_config_attributes.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
PATH_TO_TRANSFORMERS = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SPECIAL_CASES_TO_ALLOW = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def check_attribute_being_used(config_class, attributes, default_value, source_strings):
"""Check if any name in `attributes` is used in one of the strings in `source_strings`
Args:
config_class (`type`):
The configuration class for which the arguments in its `__init__` will be checked.
attributes (`List[str]`):
The name of an argument (or attribute) and its variant names if any.
default_value (`Any`):
A default value for the attribute in `attributes` assigned in the `__init__` of `config_class`.
source_strings (`List[str]`):
The python source code strings in the same modeling directory where `config_class` is defined. The file
containing the definition of `config_class` should be excluded.
"""
attribute_used = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
attribute_used = True
# Deal with multi-line cases
elif (
re.search(
rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"',
modeling_source,
)
is not None
):
attribute_used = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
attribute_used = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
attributes_to_allow = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
attributes_used_in_generation = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
case_allowed = True
if not attribute_used:
case_allowed = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
case_allowed = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
case_allowed = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
case_allowed = True
elif attribute.endswith("_token_id"):
case_allowed = True
# configuration class specific cases
if not case_allowed:
allowed_cases = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [])
case_allowed = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def check_config_attributes_being_used(config_class):
"""Check the arguments in `__init__` of `config_class` are used in the modeling files in the same directory
Args:
config_class (`type`):
The configuration class for which the arguments in its `__init__` will be checked.
"""
# Get the parameters in `__init__` of the configuration class, and the default values if any
signature = dict(inspect.signature(config_class.__init__).parameters)
parameter_names = [x for x in list(signature.keys()) if x not in ["self", "kwargs"]]
parameter_defaults = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
reversed_attribute_map = {}
if len(config_class.attribute_map) > 0:
reversed_attribute_map = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
config_source_file = inspect.getsourcefile(config_class)
model_dir = os.path.dirname(config_source_file)
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
modeling_paths = [os.path.join(model_dir, fn) for fn in os.listdir(model_dir) if fn.startswith("modeling_")]
# Get the source code strings
modeling_sources = []
for path in modeling_paths:
if os.path.isfile(path):
with open(path) as fp:
modeling_sources.append(fp.read())
unused_attributes = []
for config_param, default_value in zip(parameter_names, parameter_defaults):
# `attributes` here is all the variant names for `config_param`
attributes = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param])
if not check_attribute_being_used(config_class, attributes, default_value, modeling_sources):
unused_attributes.append(attributes[0])
return sorted(unused_attributes)
def check_config_attributes():
"""Check the arguments in `__init__` of all configuration classes are used in python files"""
configs_with_unused_attributes = {}
for _config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
config_classes_in_module = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class),
lambda x: inspect.isclass(x)
and issubclass(x, PretrainedConfig)
and inspect.getmodule(x) == inspect.getmodule(_config_class),
)
]
for config_class in config_classes_in_module:
unused_attributes = check_config_attributes_being_used(config_class)
if len(unused_attributes) > 0:
configs_with_unused_attributes[config_class.__name__] = unused_attributes
if len(configs_with_unused_attributes) > 0:
error = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(error)
if __name__ == "__main__":
check_config_attributes()
| 12,599 | 42.298969 | 118 | py |
transformers | transformers-main/utils/update_metadata.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
TRANSFORMERS_PATH = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers_module = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
PIPELINE_TAGS_AND_AUTO_MODELS = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
# Thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
def camel_case_split(identifier):
"Split a camelcased `identifier` into words."
matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier)
return [m.group(0) for m in matches]
def get_frameworks_table():
"""
Generates a dataframe containing the supported auto classes for each model type, using the content of the auto
modules.
"""
# Dictionary model names to config.
config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
model_prefix_to_model_type = {
config.replace("Config", ""): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
pt_models = collections.defaultdict(bool)
tf_models = collections.defaultdict(bool)
flax_models = collections.defaultdict(bool)
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(transformers_module):
lookup_dict = None
if _re_tf_models.match(attr_name) is not None:
lookup_dict = tf_models
attr_name = _re_tf_models.match(attr_name).groups()[0]
elif _re_flax_models.match(attr_name) is not None:
lookup_dict = flax_models
attr_name = _re_flax_models.match(attr_name).groups()[0]
elif _re_pt_models.match(attr_name) is not None:
lookup_dict = pt_models
attr_name = _re_pt_models.match(attr_name).groups()[0]
if lookup_dict is not None:
while len(attr_name) > 0:
if attr_name in model_prefix_to_model_type:
lookup_dict[model_prefix_to_model_type[attr_name]] = True
break
# Try again after removing the last word in the name
attr_name = "".join(camel_case_split(attr_name)[:-1])
all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys()))
all_models = list(all_models)
all_models.sort()
data = {"model_type": all_models}
data["pytorch"] = [pt_models[t] for t in all_models]
data["tensorflow"] = [tf_models[t] for t in all_models]
data["flax"] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
processors = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
processors[t] = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
processors[t] = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
processors[t] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
processors[t] = "AutoTokenizer"
data["processor"] = [processors[t] for t in all_models]
return pd.DataFrame(data)
def update_pipeline_and_auto_class_table(table):
"""
Update the table of model class to (pipeline_tag, auto_class) without removing old keys if they don't exist
anymore.
"""
auto_modules = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"]
auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings):
# The type of pipeline may not exist in this framework
if not hasattr(module, mapping):
continue
# First extract all model_names
model_names = []
for name in getattr(module, mapping).values():
if isinstance(name, str):
model_names.append(name)
else:
model_names.extend(list(name))
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names})
return table
def update_metadata(token, commit_sha):
"""
Update the metadata for the Transformers repo.
"""
frameworks_table = get_frameworks_table()
frameworks_dataset = Dataset.from_pandas(frameworks_table)
resolved_tags_file = hf_hub_download(
"huggingface/transformers-metadata", "pipeline_tags.json", repo_type="dataset", token=token
)
tags_dataset = Dataset.from_json(resolved_tags_file)
table = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(tags_dataset))
}
table = update_pipeline_and_auto_class_table(table)
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
model_classes = sorted(table.keys())
tags_table = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
}
)
tags_dataset = Dataset.from_pandas(tags_table)
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(tmp_dir, "frameworks.json"))
tags_dataset.to_json(os.path.join(tmp_dir, "pipeline_tags.json"))
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
commit_message = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata",
folder_path=tmp_dir,
repo_type="dataset",
token=token,
commit_message=commit_message,
)
def check_pipeline_tags():
in_table = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
pipeline_tasks = transformers_module.pipelines.SUPPORTED_TASKS
missing = []
for key in pipeline_tasks:
if key not in in_table:
model = pipeline_tasks[key]["pt"]
if isinstance(model, (list, tuple)):
model = model[0]
model = model.__name__
if model not in in_table.values():
missing.append(key)
if len(missing) > 0:
msg = ", ".join(missing)
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"`utils/update_metadata.py`: {msg}. Please add them!"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
args = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 12,459 | 42.114187 | 118 | py |
transformers | transformers-main/utils/check_dummies.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
PATH_TO_TRANSFORMERS = "src/transformers"
# Matches is_xxx_available()
_re_backend = re.compile(r"is\_([a-z_]*)_available()")
# Matches from xxx import bla
_re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_re_test_backend = re.compile(r"^\s+if\s+not\s+\(?is\_[a-z_]*\_available\(\)")
DUMMY_CONSTANT = """
{0} = None
"""
DUMMY_CLASS = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
DUMMY_FUNCTION = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def find_backend(line):
"""Find one (or multiple) backend in a code line of the init."""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
def read_init():
"""Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects."""
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Get to the point we do the actual imports for type checking
line_index = 0
while not lines[line_index].startswith("if TYPE_CHECKING"):
line_index += 1
backend_specific_objects = {}
# Go through the end of the file
while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
if backend is not None:
while not lines[line_index].startswith(" else:"):
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_single_line_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
backend_specific_objects[backend] = objects
else:
line_index += 1
return backend_specific_objects
def create_dummy_object(name, backend_name):
"""Create the code for the dummy object corresponding to `name`."""
if name.isupper():
return DUMMY_CONSTANT.format(name)
elif name.islower():
return DUMMY_FUNCTION.format(name, backend_name)
else:
return DUMMY_CLASS.format(name, backend_name)
def create_dummy_files(backend_specific_objects=None):
"""Create the content of the dummy files."""
if backend_specific_objects is None:
backend_specific_objects = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
dummy_files = {}
for backend, objects in backend_specific_objects.items():
backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]"
dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects])
dummy_files[backend] = dummy_file
return dummy_files
def check_dummies(overwrite=False):
"""Check if the dummy files are up to date and maybe `overwrite` with the right content."""
dummy_files = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
short_names = {"torch": "pt"}
# Locate actual dummy modules and read their content.
path = os.path.join(PATH_TO_TRANSFORMERS, "utils")
dummy_file_paths = {
backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py")
for backend in dummy_files.keys()
}
actual_dummies = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(file_path):
with open(file_path, "r", encoding="utf-8", newline="\n") as f:
actual_dummies[backend] = f.read()
else:
actual_dummies[backend] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main "
"__init__ has new objects."
)
with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f:
f.write(dummy_files[backend])
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` "
"to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6,221 | 35.816568 | 118 | py |
transformers | transformers-main/utils/check_table.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
TRANSFORMERS_PATH = "src/transformers"
PATH_TO_DOCS = "docs/source/en"
REPO_PATH = "."
def _find_text_in_file(filename, start_prompt, end_prompt):
"""
Find the text in `filename` between a line beginning with `start_prompt` and before `end_prompt`, removing empty
lines.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start prompt.
start_index = 0
while not lines[start_index].startswith(start_prompt):
start_index += 1
start_index += 1
end_index = start_index
while not lines[end_index].startswith(end_prompt):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
ALLOWED_MODEL_SUFFIXES = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
transformers_module = direct_transformers_import(TRANSFORMERS_PATH)
# Thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
def camel_case_split(identifier):
"Split a camelcased `identifier` into words."
matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier)
return [m.group(0) for m in matches]
def _center_text(text, width):
text_length = 2 if text == "✅" or text == "❌" else len(text)
left_indent = (width - text_length) // 2
right_indent = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def get_model_table_from_auto_modules():
"""Generates an up-to-date model table from the content of the auto modules."""
# Dictionary model names to config.
config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
model_name_to_config = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
model_name_to_prefix = {name: config.replace("Config", "") for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
slow_tokenizers = collections.defaultdict(bool)
fast_tokenizers = collections.defaultdict(bool)
pt_models = collections.defaultdict(bool)
tf_models = collections.defaultdict(bool)
flax_models = collections.defaultdict(bool)
# Let's lookup through all transformers object (once).
for attr_name in dir(transformers_module):
lookup_dict = None
if attr_name.endswith("Tokenizer"):
lookup_dict = slow_tokenizers
attr_name = attr_name[:-9]
elif attr_name.endswith("TokenizerFast"):
lookup_dict = fast_tokenizers
attr_name = attr_name[:-13]
elif _re_tf_models.match(attr_name) is not None:
lookup_dict = tf_models
attr_name = _re_tf_models.match(attr_name).groups()[0]
elif _re_flax_models.match(attr_name) is not None:
lookup_dict = flax_models
attr_name = _re_flax_models.match(attr_name).groups()[0]
elif _re_pt_models.match(attr_name) is not None:
lookup_dict = pt_models
attr_name = _re_pt_models.match(attr_name).groups()[0]
if lookup_dict is not None:
while len(attr_name) > 0:
if attr_name in model_name_to_prefix.values():
lookup_dict[attr_name] = True
break
# Try again after removing the last word in the name
attr_name = "".join(camel_case_split(attr_name)[:-1])
# Let's build that table!
model_names = list(model_name_to_config.keys())
model_names.sort(key=str.lower)
columns = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
widths = [len(c) + 2 for c in columns]
widths[0] = max([len(name) for name in model_names]) + 2
# Build the table per se
table = "|" + "|".join([_center_text(c, w) for c, w in zip(columns, widths)]) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n"
check = {True: "✅", False: "❌"}
for name in model_names:
prefix = model_name_to_prefix[name]
line = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(l, w) for l, w in zip(line, widths)]) + "|\n"
return table
def check_model_table(overwrite=False):
"""Check the model table in the index.rst is consistent with the state of the lib and maybe `overwrite`."""
current_table, start_index, end_index, lines = _find_text_in_file(
filename=os.path.join(PATH_TO_DOCS, "index.md"),
start_prompt="<!--This table is updated automatically from the auto modules",
end_prompt="<!-- End table-->",
)
new_table = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(PATH_TO_DOCS, "index.md"), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:])
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 7,490 | 40.159341 | 116 | py |
transformers | transformers-main/utils/test_module/custom_image_processing.py | from transformers import CLIPImageProcessor
class CustomImageProcessor(CLIPImageProcessor):
pass
| 103 | 16.333333 | 47 | py |
transformers | transformers-main/utils/test_module/custom_tokenization_fast.py | from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class CustomTokenizerFast(BertTokenizerFast):
slow_tokenizer_class = CustomTokenizer
pass
| 193 | 20.555556 | 48 | py |
transformers | transformers-main/utils/test_module/custom_pipeline.py | import numpy as np
from transformers import Pipeline
def softmax(outputs):
maxes = np.max(outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class PairClassificationPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "second_text" in kwargs:
preprocess_kwargs["second_text"] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 1,110 | 31.676471 | 89 | py |
transformers | transformers-main/utils/test_module/custom_tokenization.py | from transformers import BertTokenizer
class CustomTokenizer(BertTokenizer):
pass
| 88 | 13.833333 | 38 | py |
transformers | transformers-main/utils/test_module/__init__.py | 0 | 0 | 0 | py | |
transformers | transformers-main/utils/test_module/custom_processing.py | from transformers import ProcessorMixin
class CustomProcessor(ProcessorMixin):
feature_extractor_class = "AutoFeatureExtractor"
tokenizer_class = "AutoTokenizer"
| 172 | 23.714286 | 52 | py |
transformers | transformers-main/utils/test_module/custom_feature_extraction.py | from transformers import Wav2Vec2FeatureExtractor
class CustomFeatureExtractor(Wav2Vec2FeatureExtractor):
pass
| 117 | 18.666667 | 55 | py |
transformers | transformers-main/utils/test_module/custom_modeling.py | import torch
from transformers import PreTrainedModel
from .custom_configuration import CustomConfig, NoSuperInitConfig
class CustomModel(PreTrainedModel):
config_class = CustomConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass
class NoSuperInitModel(PreTrainedModel):
config_class = NoSuperInitConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.attribute, config.attribute)
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass
| 772 | 21.735294 | 77 | py |
transformers | transformers-main/utils/test_module/custom_configuration.py | from transformers import PretrainedConfig
class CustomConfig(PretrainedConfig):
model_type = "custom"
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs)
class NoSuperInitConfig(PretrainedConfig):
model_type = "custom"
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
| 380 | 21.411765 | 46 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/main.py | import argparse
import os
import os.path as osp
import torch
from torch import optim
from torchvision import transforms
import trainer
from datasets import get_dataloader
from models.loss import DetectionCriterion
from models.model import DetectionModel
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument("traindata")
parser.add_argument("valdata")
parser.add_argument("--dataset-root", default="")
parser.add_argument("--dataset", default="WIDERFace")
parser.add_argument("--lr", default=1e-4, type=float)
parser.add_argument("--weight-decay", default=0.0005, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--batch_size", default=12, type=int)
parser.add_argument("--workers", default=8, type=int)
parser.add_argument("--start-epoch", default=0, type=int)
parser.add_argument("--epochs", default=50, type=int)
parser.add_argument("--save-every", default=10, type=int)
parser.add_argument("--resume", default="")
parser.add_argument("--debug", action="store_true")
return parser.parse_args()
def main():
args = arguments()
num_templates = 25 # aka the number of clusters
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_transforms = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_loader, _ = get_dataloader(args.traindata, args, num_templates,
img_transforms=img_transforms)
model = DetectionModel(num_objects=1, num_templates=num_templates)
loss_fn = DetectionCriterion(num_templates)
# directory where we'll store model weights
weights_dir = "weights"
if not osp.exists(weights_dir):
os.mkdir(weights_dir)
# check for CUDA
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
optimizer = optim.SGD(model.learnable_parameters(args.lr), lr=args.lr,
momentum=args.momentum, weight_decay=args.weight_decay)
# optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
if args.resume:
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Set the start epoch if it has not been
if not args.start_epoch:
args.start_epoch = checkpoint['epoch']
scheduler = optim.lr_scheduler.StepLR(optimizer,
step_size=20,
last_epoch=args.start_epoch-1)
# train and evalute for `epochs`
for epoch in range(args.start_epoch, args.epochs):
trainer.train(model, loss_fn, optimizer, train_loader, epoch, device=device)
scheduler.step()
if (epoch+1) % args.save_every == 0:
trainer.save_checkpoint({
'epoch': epoch + 1,
'batch_size': train_loader.batch_size,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()
}, filename="checkpoint_{0}.pth".format(epoch+1), save_path=weights_dir)
if __name__ == '__main__':
main()
| 3,338 | 33.78125 | 92 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/evaluate.py | import argparse
import json
import os
import os.path as osp
import numpy as np
import torch
from PIL import Image
from torch.utils import data
from torchvision import transforms
from tqdm import tqdm
import trainer
from datasets import get_dataloader
from datasets.wider_face import WIDERFace
from models.model import DetectionModel
from utils import visualize
def arguments():
parser = argparse.ArgumentParser("Model Evaluator")
parser.add_argument("dataset")
parser.add_argument("--split", default="val")
parser.add_argument("--dataset-root")
parser.add_argument("--checkpoint",
help="The path to the model checkpoint", default="")
parser.add_argument("--prob_thresh", type=float, default=0.03)
parser.add_argument("--nms_thresh", type=float, default=0.3)
parser.add_argument("--workers", default=8, type=int)
parser.add_argument("--batch_size", default=1, type=int)
parser.add_argument("--results_dir", default=None)
parser.add_argument("--debug", action="store_true")
return parser.parse_args()
def dataloader(args):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_transforms = transforms.Compose([
transforms.ToTensor(),
normalize
])
val_loader, templates = get_dataloader(args.dataset, args,
train=False, split=args.split,
img_transforms=val_transforms)
return val_loader, templates
def get_model(checkpoint=None, num_templates=25):
model = DetectionModel(num_templates=num_templates)
if checkpoint:
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint["model"])
return model
def write_results(dets, img_path, split, results_dir=None):
results_dir = results_dir or "{0}_results".format(split)
if not osp.exists(results_dir):
os.makedirs(results_dir)
filename = osp.join(results_dir, img_path.replace('jpg', 'txt'))
file_dir = os.path.dirname(filename)
if not osp.exists(file_dir):
os.makedirs(file_dir)
with open(filename, 'w') as f:
f.write(img_path.split('/')[-1] + "\n")
f.write(str(dets.shape[0]) + "\n")
for x in dets:
left, top = np.round(x[0]), np.round(x[1])
width = np.round(x[2]-x[0]+1)
height = np.round(x[3]-x[1]+1)
score = x[4]
d = "{0} {1} {2} {3} {4}\n".format(int(left), int(top),
int(width), int(height), score)
f.write(d)
def run(model, val_loader, templates, prob_thresh, nms_thresh, device, split,
results_dir=None, debug=False):
for idx, (img, filename) in tqdm(enumerate(val_loader), total=len(val_loader)):
dets = trainer.get_detections(model, img, templates, val_loader.dataset.rf,
val_loader.dataset.transforms, prob_thresh,
nms_thresh, device=device)
write_results(dets, filename[0], split, results_dir)
return dets
def main():
args = arguments()
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
val_loader, templates = dataloader(args)
num_templates = templates.shape[0]
model = get_model(args.checkpoint, num_templates=num_templates)
with torch.no_grad():
# run model on val/test set and generate results files
run(model, val_loader, templates, args.prob_thresh, args.nms_thresh,
device, args.split,
results_dir=args.results_dir, debug=args.debug)
if __name__ == "__main__":
main()
| 3,789 | 31.393162 | 83 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/trainer.py | from pathlib import Path
import numpy as np
import torch
from torch.nn import functional as nnfunc
from torchvision import transforms
from models.utils import get_bboxes
from utils.nms import nms
def print_state(idx, epoch, size, loss_cls, loss_reg):
if epoch >= 0:
message = "Epoch: [{0}][{1}/{2}]\t".format(epoch, idx, size)
else:
message = "Val: [{0}/{1}]\t".format(idx, size)
print(message +
'\tloss_cls: {loss_cls:.6f}' \
'\tloss_reg: {loss_reg:.6f}'.format(loss_cls=loss_cls, loss_reg=loss_reg))
def save_checkpoint(state, filename="checkpoint.pth", save_path="weights"):
# check if the save directory exists
if not Path(save_path).exists():
Path(save_path).mkdir()
save_path = Path(save_path, filename)
torch.save(state, str(save_path))
def visualize_output(img, output, templates, proc, prob_thresh=0.55, nms_thresh=0.1):
tensor_to_image = transforms.ToPILImage()
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for t, m, s in zip(img[0], mean, std):
t.mul_(s).add_(m)
image = tensor_to_image(img[0]) # Index into the batch
cls_map = nnfunc.sigmoid(output[:, 0:templates.shape[0], :, :]).data.cpu(
).numpy().transpose((0, 2, 3, 1))[0, :, :, :]
reg_map = output[:, templates.shape[0]:, :, :].data.cpu(
).numpy().transpose((0, 2, 3, 1))[0, :, :, :]
print(np.sort(np.unique(cls_map))[::-1])
proc.visualize_heatmaps(image, cls_map, reg_map, templates,
prob_thresh=prob_thresh, nms_thresh=nms_thresh)
p = input("Continue? [Yn]")
if p.lower().strip() == 'n':
exit(0)
def draw_bboxes(image, img_id, bboxes, scores, scales, processor):
processor.render_and_save_bboxes(image, img_id, bboxes, scores, scales)
def train(model, loss_fn, optimizer, dataloader, epoch, device):
model = model.to(device)
model.train()
for idx, (img, class_map, regression_map) in enumerate(dataloader):
x = img.float().to(device)
class_map_var = class_map.float().to(device)
regression_map_var = regression_map.float().to(device)
output = model(x)
loss = loss_fn(output,
class_map_var, regression_map_var)
# visualize_output(img, output, dataloader.dataset.templates)
optimizer.zero_grad()
# Get the gradients
# torch will automatically mask the gradients to 0 where applicable!
loss.backward()
optimizer.step()
print_state(idx, epoch, len(dataloader),
loss_fn.class_average.average,
loss_fn.reg_average.average)
def get_detections(model, img, templates, rf, img_transforms,
prob_thresh=0.65, nms_thresh=0.3, scales=(-2, -1, 0, 1), device=None):
model = model.to(device)
model.eval()
dets = np.empty((0, 5)) # store bbox (x1, y1, x2, y2), score
num_templates = templates.shape[0]
# Evaluate over multiple scale
scales_list = [2 ** x for x in scales]
# convert tensor to PIL image so we can perform resizing
image = transforms.functional.to_pil_image(img[0])
min_side = np.min(image.size)
for scale in scales_list:
# scale the images
scaled_image = transforms.functional.resize(image,
np.int(min_side*scale))
# normalize the images
img = img_transforms(scaled_image)
# add batch dimension
img.unsqueeze_(0)
# now run the model
x = img.float().to(device)
output = model(x)
# first `num_templates` channels are class maps
score_cls = output[:, :num_templates, :, :]
prob_cls = torch.sigmoid(score_cls)
score_cls = score_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
prob_cls = prob_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
score_reg = output[:, num_templates:, :, :]
score_reg = score_reg.data.cpu().numpy().transpose((0, 2, 3, 1))
t_bboxes, scores = get_bboxes(score_cls, score_reg, prob_cls,
templates, prob_thresh, rf, scale)
scales = np.ones((t_bboxes.shape[0], 1)) / scale
# append scores at the end for NMS
d = np.hstack((t_bboxes, scores))
dets = np.vstack((dets, d))
# Apply NMS
keep = nms(dets, nms_thresh)
dets = dets[keep]
return dets
| 4,459 | 29.972222 | 89 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/models/loss.py | import numpy as np
import torch
from torch import nn
from .utils import balance_sampling
class AvgMeter:
def __init__(self):
self.average = 0
self.num_averaged = 0
def update(self, loss, size):
n = self.num_averaged
m = n + size
self.average = ((n * self.average) + float(loss)) / m
self.num_averaged = m
def reset(self):
self.average = 0
self.num_averaged = 0
class DetectionCriterion(nn.Module):
"""
The loss for the Tiny Faces detector
"""
def __init__(self, n_templates=25, reg_weight=1, pos_fraction=0.5):
super().__init__()
# We don't want per element averaging.
# We want to normalize over the batch or positive samples.
self.regression_criterion = nn.SmoothL1Loss(reduction='none')
self.classification_criterion = nn.SoftMarginLoss(reduction='none')
self.n_templates = n_templates
self.reg_weight = reg_weight
self.pos_fraction = pos_fraction
self.class_average = AvgMeter()
self.reg_average = AvgMeter()
self.masked_class_loss = None
self.masked_reg_loss = None
self.total_loss = None
def balance_sample(self, class_map):
device = class_map.device
label_class_np = class_map.cpu().numpy()
# iterate through batch
for idx in range(label_class_np.shape[0]):
label_class_np[idx, ...] = balance_sampling(label_class_np[idx, ...],
pos_fraction=self.pos_fraction)
class_map = torch.from_numpy(label_class_np).to(device)
return class_map
def hard_negative_mining(self, classification, class_map):
loss_class_map = nn.functional.soft_margin_loss(classification.detach(), class_map,
reduction='none')
class_map[loss_class_map < 0.03] = 0
return class_map
def forward(self, output, class_map, regression_map):
classification = output[:, 0:self.n_templates, :, :]
regression = output[:, self.n_templates:, :, :]
# online hard negative mining
class_map = self.hard_negative_mining(classification, class_map)
# balance sampling
class_map = self.balance_sample(class_map)
class_loss = self.classification_criterion(classification, class_map)
# weights used to mask out invalid regions i.e. where the label is 0
class_mask = (class_map != 0).type(output.dtype)
# Mask the classification loss
self.masked_class_loss = class_mask * class_loss
reg_loss = self.regression_criterion(regression, regression_map)
# make same size as reg_map
reg_mask = (class_map > 0).repeat(1, 4, 1, 1).type(output.dtype)
self.masked_reg_loss = reg_mask * reg_loss # / reg_loss.size(0)
self.total_loss = self.masked_class_loss.sum() + \
self.reg_weight * self.masked_reg_loss.sum()
self.class_average.update(self.masked_class_loss.sum(), output.size(0))
self.reg_average.update(self.masked_reg_loss.sum(), output.size(0))
return self.total_loss
def reset(self):
self.class_average.reset()
self.reg_average.reset()
| 3,298 | 32.663265 | 91 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/models/utils.py | import numpy as np
def get_bboxes(score_cls, score_reg, prob_cls, templates, prob_thresh, rf, scale=1, refine=True):
"""
Convert model output tensor to a set of bounding boxes and their corresponding scores
"""
num_templates = templates.shape[0]
# template to evaluate at every scale (Type A templates)
all_scale_template_ids = np.arange(4, 12)
# templates to evaluate at a single scale aka small scale (Type B templates)
one_scale_template_ids = np.arange(18, 25)
ignored_template_ids = np.setdiff1d(np.arange(25), np.concatenate((all_scale_template_ids,
one_scale_template_ids)))
template_scales = templates[:, 4]
# if we down-sample, then we only need large templates
if scale < 1:
invalid_one_scale_idx = np.where(
template_scales[one_scale_template_ids] >= 1.0)
elif scale == 1:
invalid_one_scale_idx = np.where(
template_scales[one_scale_template_ids] != 1.0)
elif scale > 1:
invalid_one_scale_idx = np.where(
template_scales[one_scale_template_ids] != 1.0)
invalid_template_id = np.concatenate((ignored_template_ids,
one_scale_template_ids[invalid_one_scale_idx]))
# zero out prediction from templates that are invalid on this scale
prob_cls[:, :, invalid_template_id] = 0.0
indices = np.where(prob_cls > prob_thresh)
fb, fy, fx, fc = indices
scores = score_cls[fb, fy, fx, fc]
scores = scores.reshape((scores.shape[0], 1))
stride, offset = rf['stride'], rf['offset']
cy, cx = fy * stride[0] + offset[0], fx * stride[1] + offset[1]
cw = templates[fc, 2] - templates[fc, 0] + 1
ch = templates[fc, 3] - templates[fc, 1] + 1
# bounding box refinements
tx = score_reg[:, :, :, 0:num_templates]
ty = score_reg[:, :, :, 1 * num_templates:2 * num_templates]
tw = score_reg[:, :, :, 2 * num_templates:3 * num_templates]
th = score_reg[:, :, :, 3 * num_templates:4 * num_templates]
if refine:
bboxes = regression_refinement(tx, ty, tw, th,
cx, cy, cw, ch,
indices)
else:
bboxes = np.array([cx - cw/2, cy - ch/2, cx + cw/2, cy + ch/2])
# bboxes has a channel dim so we remove that
bboxes = bboxes[0]
# scale the bboxes
factor = 1 / scale
bboxes = bboxes * factor
return bboxes, scores
def regression_refinement(tx, ty, tw, th, cx, cy, cw, ch, indices):
# refine the bounding boxes
dcx = cw * tx[indices]
dcy = ch * ty[indices]
rcx = cx + dcx
rcy = cy + dcy
rcw = cw * np.exp(tw[indices])
rch = ch * np.exp(th[indices])
# create bbox array
rcx = rcx.reshape((rcx.shape[0], 1))
rcy = rcy.reshape((rcy.shape[0], 1))
rcw = rcw.reshape((rcw.shape[0], 1))
rch = rch.reshape((rch.shape[0], 1))
# transpose so that it is (N, 4)
bboxes = np.array(
[rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2]).T
return bboxes
def balance_sampling(label_cls, pos_fraction, sample_size=256):
"""
Perform balance sampling by always sampling `pos_fraction` positive samples and
`(1-pos_fraction)` negative samples from the input
:param label_cls: Class labels as numpy.array.
:param pos_fraction: The maximum fraction of positive samples to keep.
:return:
"""
pos_maxnum = sample_size * pos_fraction # sample 128 positive points
# Find all the points where we have objects and ravel the indices to get a 1D array.
# This makes the subsequent operations easier to reason about
pos_idx_unraveled = np.where(label_cls == 1)
pos_idx = np.array(np.ravel_multi_index(
pos_idx_unraveled, label_cls.shape))
if pos_idx.size > pos_maxnum:
# Get all the indices of the locations to be zeroed out
didx = shuffle_index(pos_idx.size, pos_idx.size-pos_maxnum)
# Get the locations and unravel it so we can index
pos_idx_unraveled = np.unravel_index(pos_idx[didx], label_cls.shape)
label_cls[pos_idx_unraveled] = 0
neg_maxnum = pos_maxnum * (1 - pos_fraction) / pos_fraction
neg_idx_unraveled = np.where(label_cls == -1)
neg_idx = np.array(np.ravel_multi_index(neg_idx_unraveled,
label_cls.shape))
if neg_idx.size > neg_maxnum:
# Get all the indices of the locations to be zeroed out
ridx = shuffle_index(neg_idx.size, neg_maxnum)
didx = np.arange(0, neg_idx.size)
didx = np.delete(didx, ridx)
neg_idx = np.unravel_index(neg_idx[didx], label_cls.shape)
label_cls[neg_idx] = 0
return label_cls
def shuffle_index(n, n_out):
"""
Randomly shuffle the indices and return a subset of them
:param n: The number of indices to shuffle.
:param n_out: The number of output indices.
:return:
"""
n = int(n)
n_out = int(n_out)
if n == 0 or n_out == 0:
return np.empty(0)
x = np.random.permutation(n)
# the output should be at most the size of the input
assert n_out <= n
if n_out != n:
x = x[:n_out]
return x
| 5,274 | 32.386076 | 97 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/models/model.py | import numpy as np
import torch
from torch import nn
from torchvision.models import resnet50, resnet101
class DetectionModel(nn.Module):
"""
Hybrid Model from Tiny Faces paper
"""
def __init__(self, base_model=resnet101, num_templates=1, num_objects=1):
super().__init__()
# 4 is for the bounding box offsets
output = (num_objects + 4)*num_templates
self.model = base_model(pretrained=True)
# delete unneeded layer
del self.model.layer4
self.score_res3 = nn.Conv2d(in_channels=512, out_channels=output,
kernel_size=1, padding=0)
self.score_res4 = nn.Conv2d(in_channels=1024, out_channels=output,
kernel_size=1, padding=0)
self.score4_upsample = nn.ConvTranspose2d(in_channels=output, out_channels=output,
kernel_size=4, stride=2, padding=1, bias=False)
self._init_bilinear()
def _init_weights(self):
pass
def _init_bilinear(self):
"""
Initialize the ConvTranspose2d layer with a bilinear interpolation mapping
:return:
"""
k = self.score4_upsample.kernel_size[0]
factor = np.floor((k+1)/2)
if k % 2 == 1:
center = factor
else:
center = factor + 0.5
C = np.arange(1, 5)
f = np.zeros((self.score4_upsample.in_channels,
self.score4_upsample.out_channels, k, k))
for i in range(self.score4_upsample.out_channels):
f[i, i, :, :] = (np.ones((1, k)) - (np.abs(C-center)/factor)).T @ \
(np.ones((1, k)) - (np.abs(C-center)/factor))
self.score4_upsample.weight = torch.nn.Parameter(data=torch.Tensor(f))
def learnable_parameters(self, lr):
parameters = [
# Be T'Challa. Don't freeze.
{'params': self.model.parameters(), 'lr': lr},
{'params': self.score_res3.parameters(), 'lr': 0.1*lr},
{'params': self.score_res4.parameters(), 'lr': 1*lr},
{'params': self.score4_upsample.parameters(), 'lr': 0} # freeze UpConv layer
]
return parameters
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
# res2 = x
x = self.model.layer2(x)
res3 = x
x = self.model.layer3(x)
res4 = x
score_res3 = self.score_res3(res3)
score_res4 = self.score_res4(res4)
score4 = self.score4_upsample(score_res4)
# We need to do some fancy cropping to accomodate the difference in image sizes in eval
if not self.training:
# from vl_feats DagNN Crop
cropv = score4.size(2) - score_res3.size(2)
cropu = score4.size(3) - score_res3.size(3)
# if the crop is 0 (both the input sizes are the same)
# we do some arithmetic to allow python to index correctly
if cropv == 0:
cropv = -score4.size(2)
if cropu == 0:
cropu = -score4.size(3)
score4 = score4[:, :, 0:-cropv, 0:-cropu]
else:
# match the dimensions arbitrarily
score4 = score4[:, :, 0:score_res3.size(2), 0:score_res3.size(3)]
score = score_res3 + score4
return score
| 3,496 | 32.304762 | 97 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/models/__init__.py | 0 | 0 | 0 | py | |
tiny-faces-pytorch | tiny-faces-pytorch-master/datasets/processor.py | import numpy as np
from copy import deepcopy
from utils.visualize import draw_bounding_box, render_and_save_bboxes, visualize_bboxes
from utils.nms import nms
from utils.metrics import rect_dist
from utils.dense_overlap import compute_dense_overlap
import logging
logger = logging.getLogger("detector")
class DataProcessor:
"""
This is a helper class to abstract out all the operation needed during the data-loading
pipeline of the Tiny Faces object detector.
The idea is that this can act as a mixin that enables torch dataloaders with the heatmap
generation semantics.
"""
def __init__(self, input_size, heatmap_size, pos_thresh, neg_thresh, templates,
img_means=None, rf=None):
self.input_size = input_size
self.heatmap_size = heatmap_size
self.pos_thresh = pos_thresh
self.neg_thresh = neg_thresh
self.templates = templates
self.rf = rf
self.ofy, self.ofx = rf['offset']
self.sty, self.stx = rf['stride']
self.img_means = img_means or [0.485, 0.456, 0.406]
def crop_image(self, img, bboxes):
"""
Crop a 500x500 patch from the image, taking care for smaller images.
bboxes is the np.array of all bounding boxes [x1, y1, x2, y2]
"""
# randomly pick a cropping window for the image
# We keep the second arg to randint at least 1 since randint is [low, high)
crop_x1 = np.random.randint(0, np.max([1, (img.shape[1] - self.input_size[1] + 1)]))
crop_y1 = np.random.randint(0, np.max([1, (img.shape[0] - self.input_size[0] + 1)]))
crop_x2 = min(img.shape[1], crop_x1 + self.input_size[1])
crop_y2 = min(img.shape[0], crop_y1 + self.input_size[0])
crop_h = crop_y2 - crop_y1
crop_w = crop_x2 - crop_x1
# place the cropped image in a random location in a `input_size` image
paste_box = [0, 0, 0, 0] # x1, y1, x2, y2
paste_box[0] = np.random.randint(0, self.input_size[1] - crop_w + 1)
paste_box[1] = np.random.randint(0, self.input_size[0] - crop_h + 1)
paste_box[2] = paste_box[0] + crop_w
paste_box[3] = paste_box[1] + crop_h
# set this to average image colors
# this will later be subtracted in mean image subtraction
img_buf = np.zeros((self.input_size + (3,)))
# add the average image so it gets subtracted later.
for i, c in enumerate(self.img_means):
img_buf[:, :, i] += c
# img is a int8 array, so we need to scale the values accordingly
img_buf = (img_buf * 255).astype(np.int8)
img_buf[paste_box[1]:paste_box[3], paste_box[0]:paste_box[2], :] = img[crop_y1:crop_y2, crop_x1:crop_x2, :]
if bboxes.shape[0] > 0:
# check if overlap is above negative threshold
tbox = deepcopy(bboxes)
tbox[:, 0] = np.maximum(tbox[:, 0], crop_x1)
tbox[:, 1] = np.maximum(tbox[:, 1], crop_y1)
tbox[:, 2] = np.minimum(tbox[:, 2], crop_x2)
tbox[:, 3] = np.minimum(tbox[:, 3], crop_y2)
overlap = 1 - rect_dist(tbox, bboxes)
# adjust the bounding boxes - first for crop and then for random placement
bboxes[:, 0] = bboxes[:, 0] - crop_x1 + paste_box[0]
bboxes[:, 1] = bboxes[:, 1] - crop_y1 + paste_box[1]
bboxes[:, 2] = bboxes[:, 2] - crop_x1 + paste_box[0]
bboxes[:, 3] = bboxes[:, 3] - crop_y1 + paste_box[1]
# correct for bbox to be within image border
bboxes[:, 0] = np.minimum(self.input_size[1], np.maximum(0, bboxes[:, 0]))
bboxes[:, 1] = np.minimum(self.input_size[0], np.maximum(0, bboxes[:, 1]))
bboxes[:, 2] = np.minimum(self.input_size[1], np.maximum(1, bboxes[:, 2]))
bboxes[:, 3] = np.minimum(self.input_size[0], np.maximum(1, bboxes[:, 3]))
# check to see if the adjusted bounding box is invalid
invalid = np.logical_or(np.logical_or(bboxes[:, 2] <= bboxes[:, 0], bboxes[:, 3] <= bboxes[:, 1]),
overlap < self.neg_thresh)
# remove invalid bounding boxes
ind = np.where(invalid)
bboxes = np.delete(bboxes, ind, 0)
return img_buf, bboxes, paste_box
def get_padding(self, paste_box):
"""
Get the padding of the image based on where the sampled image patch was placed.
:param paste_box: [x1, y1, x2, y2]
:return:
"""
ofy, ofx = self.rf['offset']
sty, stx = self.rf['stride']
vsy, vsx = self.heatmap_size
coarse_x, coarse_y = np.meshgrid(ofx + np.array(range(vsx)) * stx,
ofy + np.array(range(vsy)) * sty)
# each cluster is [x1, y1, x2, y2]
dx1 = self.templates[:, 0]
dy1 = self.templates[:, 1]
dx2 = self.templates[:, 2]
dy2 = self.templates[:, 3]
# compute the bounds
# We add new axes so that the arrays are numpy broadcasting compatible
coarse_xx1 = coarse_x[:, :, np.newaxis] + dx1[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
coarse_yy1 = coarse_y[:, :, np.newaxis] + dy1[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
coarse_xx2 = coarse_x[:, :, np.newaxis] + dx2[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
coarse_yy2 = coarse_y[:, :, np.newaxis] + dy2[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
# Matlab code indexes from 1 hence to check against it, we need to add +1
# However, in python we don't need the +1 during actual training
padx1 = coarse_xx1 < paste_box[0] + 1
pady1 = coarse_yy1 < paste_box[1] + 1
padx2 = coarse_xx2 > paste_box[2]
pady2 = coarse_yy2 > paste_box[3]
pad_mask = padx1 | pady1 | padx2 | pady2
return pad_mask
def get_regression(self, bboxes, cluster_boxes, iou):
"""
Compute the target bounding box regression values
:param bboxes:
:param cluster_boxes:
:param iou:
:return:
"""
ofy, ofx = self.rf['offset']
sty, stx = self.rf['stride']
vsy, vsx = self.heatmap_size
coarse_xx, coarse_yy = np.meshgrid(ofx + np.array(range(vsx)) * stx,
ofy + np.array(range(vsy)) * sty)
dx1, dy1, dx2, dy2 = cluster_boxes
# We reshape to take advantage of numpy broadcasting
fxx1 = bboxes[:, 0].reshape(1, 1, 1, bboxes.shape[0]) # (1, 1, 1, bboxes)
fyy1 = bboxes[:, 1].reshape(1, 1, 1, bboxes.shape[0])
fxx2 = bboxes[:, 2].reshape(1, 1, 1, bboxes.shape[0])
fyy2 = bboxes[:, 3].reshape(1, 1, 1, bboxes.shape[0])
h = dy2 - dy1 + 1
w = dx2 - dx1 + 1
dhh = h.reshape(1, 1, h.shape[0], 1) # (1, 1, N, 1)
dww = w.reshape(1, 1, w.shape[0], 1) # (1, 1, N, 1)
fcx = (fxx1 + fxx2) / 2
fcy = (fyy1 + fyy2) / 2
tx = np.divide((fcx - coarse_xx.reshape(vsy, vsx, 1, 1)), dww)
ty = np.divide((fcy - coarse_yy.reshape(vsy, vsx, 1, 1)), dhh)
fhh = fyy2 - fyy1 + 1
fww = fxx2 - fxx1 + 1
tw = np.log(np.divide(fww, dww)) # (1, 1, N, bboxes)
th = np.log(np.divide(fhh, dhh))
# Randomly perturb the IOU so that if multiple candidates have the same IOU,
# we don't pick the same one every time. This is useful when the template is smaller than the GT bbox
iou = iou + (1e-6 * np.random.rand(*iou.shape))
best_obj_per_loc = iou.argmax(axis=3)
idx0, idx1, idx2 = np.indices(iou.shape[:-1])
tx = tx[idx0, idx1, idx2, best_obj_per_loc]
ty = ty[idx0, idx1, idx2, best_obj_per_loc]
tw = np.repeat(tw, vsy, axis=0) # (vsy, 1, N, bboxes)
tw = np.repeat(tw, vsx, axis=1) # (vsy, vsx, N, bboxes)
tw = tw[idx0, idx1, idx2, best_obj_per_loc]
th = np.repeat(th, vsy, axis=0)
th = np.repeat(th, vsx, axis=1)
th = th[idx0, idx1, idx2, best_obj_per_loc]
return np.concatenate((tx, ty, tw, th), axis=2), iou
def get_heatmaps(self, bboxes, pad_mask):
ofy, ofx = self.rf['offset']
sty, stx = self.rf['stride']
vsy, vsx = self.heatmap_size
nt = self.templates.shape[0]
# Initiate heatmaps
class_maps = -np.ones((vsy, vsx, nt))
regress_maps = np.zeros((vsy, vsx, nt * 4))
# each cluster is [-w/2, -h/2, w/2, h/2]
dx1, dx2 = self.templates[:, 0], self.templates[:, 2]
dy1, dy2 = self.templates[:, 1], self.templates[:, 3]
# Filter out invalid bbox
invalid = np.logical_or(bboxes[:, 2] <= bboxes[:, 0], bboxes[:, 3] <= bboxes[:, 1])
ind = np.where(invalid)
bboxes = np.delete(bboxes, ind, axis=0)
ng = bboxes.shape[0]
iou = np.zeros((vsy, vsx, self.templates.shape[0], bboxes.shape[0]))
if ng > 0:
gx1, gy1, gx2, gy2 = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
iou = compute_dense_overlap(ofx, ofy, stx, sty, vsx, vsy,
dx1, dy1, dx2, dy2,
gx1, gy1, gx2, gy2,
1, 1)
regress_maps, iou = self.get_regression(bboxes, [dx1, dy1, dx2, dy2], iou)
best_iou = iou.max(axis=3)
# Set max IoU values to 1 (even if they are < pos_thresh, as long as they are above neg_thresh)
per_object_iou = np.reshape(iou, (-1, ng))
fbest_idx = np.argmax(per_object_iou, axis=0)
iou_ = np.amax(per_object_iou, axis=0)
fbest_idx = np.unravel_index(fbest_idx[iou_ > self.neg_thresh], iou.shape[:-1])
class_maps[fbest_idx] = 1
# Assign positive labels
class_maps = np.maximum(class_maps, (best_iou >= self.pos_thresh)*2-1)
# If between positive and negative, assign as gray area
gray = -np.ones(class_maps.shape)
gray[np.bitwise_and(self.neg_thresh <= best_iou, best_iou < self.pos_thresh)] = 0
class_maps = np.maximum(class_maps, gray) # since we set the max IoU values to 1
# handle the boundary
non_neg_border = np.bitwise_and(pad_mask, class_maps != -1)
class_maps[non_neg_border] = 0
regress_maps[:, :, :nt][non_neg_border] = 0
# Return heatmaps
return class_maps, regress_maps, iou
def visualize_heatmaps(self, img, cls_map, reg_map, templates, prob_thresh=1, nms_thresh=1, iou=None):
"""
Expect cls_map and reg_map to be of the form HxWxC
"""
fy, fx, fc = np.where(cls_map >= prob_thresh)
# print(iou.shape)
# best_iou = iou.max(axis=3)
# print(best_iou.shape)
# fy, fx, fc = np.where(best_iou >= 0.5) # neg thresh
cy, cx = fy*self.sty + self.ofy, fx*self.stx + self.ofx
cw = templates[fc, 2] - templates[fc, 0]
ch = templates[fc, 3] - templates[fc, 1]
# box_ovlp = best_iou[fc, fy, fx]
num_templates = templates.shape[0]
# refine bounding box
tx = reg_map[:, :, 0*num_templates:1*num_templates]
ty = reg_map[:, :, 1*num_templates:2*num_templates]
tw = reg_map[:, :, 2*num_templates:3*num_templates]
th = reg_map[:, :, 3*num_templates:4*num_templates]
dcx = cw * tx[fy, fx, fc]
dcy = ch * ty[fy, fx, fc]
rx = cx + dcx
ry = cy + dcy
rw = cw * np.exp(tw[fy, fx, fc])
rh = ch * np.exp(th[fy, fx, fc])
bboxes = np.array([np.abs(rx-rw/2), np.abs(ry-rh/2), rx+rw/2, ry+rh/2]).T
scores = cls_map[fy, fx, fc]
dets = np.hstack((bboxes, scores[:, np.newaxis]))
keep = nms(dets, nms_thresh)
bboxes = dets[keep][:, 0:4]
# bbox_iou = best_iou[fy, fx, fc]
# print("Best bounding box", bboxes)
# print(bboxes.shape)
print("Number of bboxes ", bboxes.shape[0])
for idx, bbox in enumerate(bboxes):
bbox = np.round(np.array(bbox))
print(bbox)
# img = draw_bounding_box(img, bbox, {"name": "{0}".format(np.around(bbox_iou[idx], decimals=2))})
img = draw_bounding_box(img, bbox, {"name": "{0}".format(idx)})
# if idx == 20:
# break
img.show(title="Heatmap visualized")
| 12,457 | 39.448052 | 115 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/datasets/__init__.py | import numpy as np
import os
import os.path as osp
import json
from utils.cluster import compute_kmedoids
from .wider_face import WIDERFace
from torch.utils import data
def get_dataloader(datapath, args, num_templates=25,
template_file="templates.json", img_transforms=None,
train=True, split="train"):
template_file = osp.join("datasets", template_file)
if osp.exists(template_file):
templates = json.load(open(template_file))
else:
# Cluster the bounding boxes to get the templates
dataset = WIDERFace(osp.expanduser(args.traindata), [])
clustering = compute_kmedoids(dataset.get_all_bboxes(), 1, indices=num_templates,
option='pyclustering', max_clusters=num_templates)
print("Canonical bounding boxes computed")
templates = clustering[num_templates]['medoids'].tolist()
# record templates
json.dump(templates, open(template_file, "w"))
templates = np.round_(np.array(templates), decimals=8)
data_loader = data.DataLoader(WIDERFace(osp.expanduser(datapath), templates,
train=train, split=split, img_transforms=img_transforms,
dataset_root=osp.expanduser(args.dataset_root),
debug=args.debug),
batch_size=args.batch_size, shuffle=train,
num_workers=args.workers, pin_memory=True)
return data_loader, templates
| 1,585 | 38.65 | 100 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/datasets/wider_face.py | from pathlib import Path
import numpy as np
import torch
from PIL import Image
from torch.utils.data import dataset
from torchvision import transforms
from utils import visualize
from .processor import DataProcessor
class WIDERFace(dataset.Dataset):
"""The WIDERFace dataset is generated using MATLAB,
so a lot of small housekeeping elements have been added
to take care of the indexing discrepancies."""
def __init__(self, path, templates, img_transforms=None, dataset_root="", split="train",
train=True, input_size=(500, 500), heatmap_size=(63, 63),
pos_thresh=0.7, neg_thresh=0.3, pos_fraction=0.5, debug=False):
super().__init__()
self.data = []
self.split = split
self.load(path)
print("Dataset loaded")
print("{0} samples in the {1} dataset".format(len(self.data),
self.split))
# self.data = data
# canonical object templates obtained via clustering
# NOTE we directly use the values from Peiyun's repository stored in "templates.json"
self.templates = templates
self.transforms = img_transforms
self.dataset_root = Path(dataset_root)
self.input_size = input_size
self.heatmap_size = heatmap_size
self.pos_thresh = pos_thresh
self.neg_thresh = neg_thresh
self.pos_fraction = pos_fraction
# receptive field computed using a combination of values from Matconvnet
# plus derived equations.
self.rf = {
'size': [859, 859],
'stride': [8, 8],
'offset': [-1, -1]
}
self.processor = DataProcessor(input_size, heatmap_size,
pos_thresh, neg_thresh,
templates, rf=self.rf)
self.debug = debug
def load(self, path):
"""Load the dataset from the text file."""
if self.split in ("train", "val"):
lines = open(path).readlines()
self.data = []
idx = 0
while idx < len(lines):
img = lines[idx].strip()
idx += 1
n = int(lines[idx].strip())
idx += 1
bboxes = np.empty((n, 10))
if n == 0:
idx += 1
else:
for b in range(n):
bboxes[b, :] = [abs(float(x))
for x in lines[idx].strip().split()]
idx += 1
# remove invalid bboxes where w or h are 0
invalid = np.where(np.logical_or(bboxes[:, 2] == 0,
bboxes[:, 3] == 0))
bboxes = np.delete(bboxes, invalid, 0)
# bounding boxes are 1 indexed so we keep them like that
# and treat them as abstract geometrical objects
# We only need to worry about the box indexing when actually rendering them
# convert from (x, y, w, h) to (x1, y1, x2, y2)
# We work with the two point representation
# since cropping becomes easier to deal with
# -1 to ensure the same representation as in Matlab.
bboxes[:, 2] = bboxes[:, 0] + bboxes[:, 2] - 1
bboxes[:, 3] = bboxes[:, 1] + bboxes[:, 3] - 1
datum = {
"img_path": img,
"bboxes": bboxes[:, 0:4],
"blur": bboxes[:, 4],
"expression": bboxes[:, 5],
"illumination": bboxes[:, 6],
"invalid": bboxes[:, 7],
"occlusion": bboxes[:, 8],
"pose": bboxes[:, 9]
}
self.data.append(datum)
elif self.split == "test":
data = open(path).readlines()
self.data = [{'img_path': x.strip()} for x in data]
def get_all_bboxes(self):
bboxes = np.empty((0, 4))
for datum in self.data:
bboxes = np.vstack((bboxes, datum['bboxes']))
return bboxes
def __len__(self):
return len(self.data)
def process_inputs(self, image, bboxes):
# Randomly resize the image
rnd = np.random.rand()
if rnd < 1 / 3:
# resize by half
scaled_shape = (int(0.5 * image.height), int(0.5 * image.width))
image = transforms.functional.resize(image, scaled_shape)
bboxes = bboxes / 2
elif rnd > 2 / 3:
# double size
scaled_shape = (int(2 * image.height), int(2 * image.width))
image = transforms.functional.resize(image, scaled_shape)
bboxes = bboxes * 2
# convert from PIL Image to ndarray
img = np.array(image)
# Get a random crop of the image and keep only relevant bboxes
img, bboxes, paste_box = self.processor.crop_image(img, bboxes)
pad_mask = self.processor.get_padding(paste_box)
# Random Flip
flip = np.random.rand() > 0.5
if flip:
img = np.fliplr(img).copy() # flip the image
lx1, lx2 = np.array(bboxes[:, 0]), np.array(bboxes[:, 2])
# Flip the bounding box. +1 for correct indexing
bboxes[:, 0] = self.input_size[1] - lx2 + 1
bboxes[:, 2] = self.input_size[1] - lx1 + 1
pad_mask = np.fliplr(pad_mask)
# Get the ground truth class and regression maps
class_maps, regress_maps, iou = self.processor.get_heatmaps(bboxes,
pad_mask)
if self.debug:
# Visualize stuff
visualize.visualize_bboxes(Image.fromarray(img.astype('uint8'), 'RGB'),
bboxes)
self.processor.visualize_heatmaps(Image.fromarray(img.astype('uint8'), 'RGB'),
class_maps, regress_maps, self.templates, iou=iou)
# and now we exit
exit(0)
# transpose so we get CxHxW
class_maps = class_maps.transpose((2, 0, 1))
regress_maps = regress_maps.transpose((2, 0, 1))
# img is type float64. Convert it to uint8 so torch knows to treat it like an image
img = img.astype(np.uint8)
return img, class_maps, regress_maps, bboxes
def __getitem__(self, index):
datum = self.data[index]
image_root = self.dataset_root / "WIDER_{0}".format(self.split)
image_path = image_root / "images" / datum['img_path']
image = Image.open(image_path).convert('RGB')
if self.split == 'train':
bboxes = datum['bboxes']
if self.debug:
if bboxes.shape[0] == 0:
print(image_path)
print("Dataset index: \t", index)
print("image path:\t", image_path)
img, class_map, reg_map, bboxes = self.process_inputs(image,
bboxes)
# convert everything to tensors
if self.transforms is not None:
# if img is a byte or uint8 array, it will convert from 0-255 to 0-1
# this converts from (HxWxC) to (CxHxW) as well
img = self.transforms(img)
class_map = torch.from_numpy(class_map)
reg_map = torch.from_numpy(reg_map)
return img, class_map, reg_map
elif self.split == 'val':
# NOTE Return only the image and the image path.
# Use the eval_tools to get the final results.
if self.transforms is not None:
# Only convert to tensor since we do normalization after rescaling
img = transforms.functional.to_tensor(image)
return img, datum['img_path']
elif self.split == 'test':
filename = datum['img_path']
if self.transforms is not None:
img = self.transforms(image)
return img, filename
| 8,229 | 34.938865 | 96 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/nms.py | import numpy as np
def nms(dets, thresh):
"""
Courtesy of Ross Girshick
[https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py]
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(int(i))
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return np.array(keep).astype(np.int)
| 921 | 24.611111 | 84 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/test_dense_overlap.py | from .dense_overlap import compute_dense_overlap
from scipy.io import loadmat
import numpy as np
d = loadmat("dense_overlap.mat")
ofx, ofy = d['ofx'][0, 0], d['ofy'][0, 0]
stx, sty = d['stx'][0, 0], d['sty'][0, 0]
vsx, vsy = d['vsx'][0, 0], d['vsy'][0, 0]
dx1, dy1, dx2, dy2 = d['dx1'], d['dy1'], d['dx2'], d['dy2']
dx1 = dx1.reshape(dx1.shape[2])
dy1 = dy1.reshape(dy1.shape[2])
dx2 = dx2.reshape(dx2.shape[2])
dy2 = dy2.reshape(dy2.shape[2])
gx1, gy1, gx2, gy2 = d['gx1'], d['gy1'], d['gx2'], d['gy2']
gx1 = gx1.reshape(gx1.shape[0])
gy1 = gy1.reshape(gy1.shape[0])
gx2 = gx2.reshape(gx2.shape[0])
gy2 = gy2.reshape(gy2.shape[0])
correct_iou = d['iou']
iou = compute_dense_overlap(ofx, ofy, stx, sty, vsx, vsy,
dx1, dy1, dx2, dy2,
gx1, gy1, gx2, gy2,
1, 1)
print("Computed IOU")
print("iou shape", iou.shape)
print("correct iou shape", correct_iou.shape)
print("Tensors are close enough?", np.allclose(iou, correct_iou))
print("Tensors are equal?", np.array_equal(iou, correct_iou))
| 1,076 | 29.771429 | 65 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/dense_overlap.py | import numpy as np
def compute_dense_overlap(ofx, ofy, stx, sty, vsx, vsy,
dx1, dy1, dx2, dy2,
gx1, gy1, gx2, gy2, zmx=1, zmy=1):
"""
Compute the dense IoU
"""
num_templates = dx1.shape[0]
num_gt = gx1.shape[0]
ty, tx = (vsy - 1) * zmy + 1, (vsx - 1) * zmx + 1 # + 1 is by definition of receptive field
overlap = np.zeros((ty, tx, num_templates, num_gt))
for i in range(num_gt):
bbox_x1, bbox_y1, bbox_x2, bbox_y2 = gx1[i], gy1[i], gx2[i], gy2[i]
bbox_w, bbox_h = bbox_x2 - bbox_x1 + 1, bbox_y2 - bbox_y1 + 1
bbox_area = bbox_w * bbox_h
for j in range(num_templates):
delta_x1, delta_y1, delta_x2, delta_y2 = dx1[j], dy1[j], dx2[j], dy2[j]
filter_h = delta_y2 - delta_y1 + 1
filter_w = delta_x2 - delta_x1 + 1
filter_area = filter_w * filter_h
xmax = tx
ymax = ty
# enumerate spatial locations
for x in range(xmax):
for y in range(ymax):
cx = ofx + x*(stx/zmx)
cy = ofy + y*(sty/zmy)
x1 = delta_x1 + cx
y1 = delta_y1 + cy
x2 = delta_x2 + cx
y2 = delta_y2 + cy
xx1 = max(x1, bbox_x1)
yy1 = max(y1, bbox_y1)
xx2 = min(x2, bbox_x2)
yy2 = min(y2, bbox_y2)
int_w = xx2 - xx1 + 1
int_h = yy2 - yy1 + 1
if int_h > 0 and int_w > 0:
int_area = int_w * int_h
union_area = filter_area + bbox_area - int_area
overlap[y, x, j, i] = int_area / union_area
else:
overlap[y, x, j, i] = 0
# truncate the number of decimals to match MATLAB behavior
return np.around(overlap, decimals=14)
| 1,997 | 31.754098 | 96 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/cluster.py |
import argparse
from datetime import datetime
from pathlib import Path
import numpy as np
from PIL import Image, ImageDraw
from pyclust import KMedoids
from pyclustering.cluster.kmedoids import kmedoids
import joblib
from tqdm import tqdm
from .k_medoids import kMedoids
from .metrics import jaccard_index, rect_dist
def centralize_bbox(bboxes):
"""
Convert the bounding boxes from (x, y, w, h) to (-w/2, -h/2, w/2, h/2).
We perform clustering based on aspect ratio only.
"""
print("Centralize and vectorize")
hs = bboxes[:, 3] - bboxes[:, 1] + 1
ws = bboxes[:, 2] - bboxes[:, 0] + 1
rects = np.vstack([-(ws-1)/2, -(hs-1)/2, (ws-1)/2, (hs-1)/2]).T
return rects
def compute_distances(bboxes):
print("Computing distances")
distances = np.zeros((len(bboxes), len(bboxes)))
for i in tqdm(range(len(bboxes)), total=len(bboxes)):
for j in range(len(bboxes)):
distances[i, j] = 1 - jaccard_index(bboxes[i, :], bboxes[j, :], (i, j))
return distances
def draw_bboxes(clusters):
"""
Draw and save the clustered bounding boxes for inspection
:param clusters:
:return:
"""
im = Image.new('RGB', [512, 512])
d = ImageDraw.Draw(im)
for bbox in clusters['medoids']:
box = [(0, 0), (-bbox[0]+bbox[2], -bbox[1]+bbox[3])]
color = tuple(np.random.choice(range(256), size=3))
d.rectangle(box, outline=color)
im.save("canonical_bbox_clusters_{0}.jpg".format(len(clusters['medoids'])))
# im.show()
def compute_kmedoids(bboxes, cls, option='pyclustering', indices=15, max_clusters=35, max_limit=5000):
print("Performing clustering using", option)
clustering = [{} for _ in range(indices)]
bboxes = centralize_bbox(bboxes)
# subsample the number of bounding boxes so that it can fit in memory and is faster
if bboxes.shape[0] > max_limit:
sub_ind = np.random.choice(np.arange(bboxes.shape[0]), size=max_limit, replace=False)
bboxes = bboxes[sub_ind]
distances_cache = Path('distances_{0}.jbl'.format(cls))
if distances_cache.exists():
print("Loading distances")
dist = joblib.load(distances_cache)
else:
dist = compute_distances(bboxes)
joblib.dump(dist, distances_cache, compress=5)
if option == 'pyclustering':
for k in range(indices, max_clusters+1):
print(k, "clusters")
initial_medoids = np.random.choice(bboxes.shape[0], size=k, replace=False)
kmedoids_instance = kmedoids(dist, initial_medoids, ccore=True, data_type='distance_matrix')
print("Running KMedoids")
t1 = datetime.now()
kmedoids_instance.process()
dt = datetime.now() - t1
print("Total time taken for clustering {k} medoids: {0}min:{1}s"
.format(dt.seconds // 60, dt.seconds % 60, k=k))
medoids_idx = kmedoids_instance.get_medoids()
medoids = bboxes[medoids_idx]
clustering.append({'n_clusters': k, 'medoids': medoids, 'class': cls})
elif option == 'pyclust':
for k in range(indices, max_clusters+1):
print(k, "clusters")
kmd = KMedoids(n_clusters=k, distance=rect_dist, n_trials=1, max_iter=2)
t1 = datetime.now()
kmd.fit(bboxes)
dt = datetime.now() - t1
print("Total time taken for clustering {k} medoids: {0}min:{1}s"
.format(dt.seconds//60, dt.seconds % 60, k=k))
medoids = kmd.centers_
clustering.append({'n_clusters': k, 'medoids': medoids, 'class': cls})
elif option == 'local':
for k in range(indices, max_clusters+1):
print(k, "clusters")
curr_medoids, cluster_idxs = kMedoids(dist, k=k)
medoids = []
for m in curr_medoids:
medoids.append(bboxes[m, :])
clustering.append({'n_clusters': k, 'medoids': medoids, 'class': cls})
return clustering
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument('dataset_path')
# 3 is the category ID for cars
parser.add_argument('--cls', default=3, type=int, help="Indicate which category of objects we are interested in")
parser.add_argument('--clustering', default='pyclustering', choices=('pyclustering', 'pyclust', 'local'))
return parser.parse_args()
# def main():
# args = arguments()
#
# bboxes = get_class_data(cls=args.cls, dataset_path=args.dataset_path)
#
# clustering = compute_kmedoids(bboxes, args.cls, option=args.clustering)
#
# cluster_file = Path(args.dataset_path, 'clustering.jbl')
#
# joblib.dump(clustering, cluster_file, compress=5)
#
# ## For visualization
# # clusters = joblib.load('clustering.jbl')
# # draw_bboxes(clusters[25])
# #
# # for i in range(25, 36):
# # draw_bboxes(clusters[i])
# if __name__ == "__main__":
# main()
| 4,972 | 30.878205 | 117 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/k_medoids.py | import numpy as np
import warnings
def kMedoids(distances, k):
"""
https://github.com/salspaugh/machine_learning/blob/master/clustering/kmedoids.py
:param distances:
:param k:
:return:
"""
n = distances.shape[0]
medoid_idxs = np.random.choice(n, size=k, replace=False)
old_medoids_idxs = np.zeros(k)
while not np.all(medoid_idxs == old_medoids_idxs): # and n_iter_ < max_iter_
# retain a copy of the old assignments
old_medoids_idxs = np.copy(medoid_idxs)
cluster_idxs = get_cluster_indices(distances, medoid_idxs)
medoid_idxs = update_medoids(distances, cluster_idxs, medoid_idxs)
return medoid_idxs, cluster_idxs
def get_cluster_indices(distances, medoid_idxs):
cluster_idxs = np.argmin(distances[medoid_idxs, :], axis=0)
return cluster_idxs
def update_medoids(distances, cluster_idxs, medoid_idxs):
for cluster_idx in range(medoid_idxs.shape[0]):
if sum(cluster_idxs == cluster_idx) == 0:
warnings.warn("Cluster {} is empty!".format(cluster_idx))
continue
curr_cost = np.sum(distances[medoid_idxs[cluster_idx], cluster_idxs == cluster_idx])
# Extract the distance matrix between the data points
# inside the cluster_idx
D_in = distances[cluster_idxs == cluster_idx, :]
D_in = D_in[:, cluster_idxs == cluster_idx]
# Calculate all costs there exists between all
# the data points in the cluster_idx
all_costs = np.sum(D_in, axis=1)
# Find the index for the smallest cost in cluster_idx
min_cost_idx = np.argmin(all_costs)
# find the value of the minimum cost in cluster_idx
min_cost = all_costs[min_cost_idx]
# If the minimum cost is smaller than that
# exhibited by the currently used medoid,
# we switch to using the new medoid in cluster_idx
if min_cost < curr_cost:
# Find data points that belong to cluster_idx,
# and assign the newly found medoid as the medoid
# for cluster c
medoid_idxs[cluster_idx] = np.where(cluster_idxs == cluster_idx)[0][min_cost_idx]
return medoid_idxs
| 2,203 | 32.393939 | 93 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/metrics.py | import json
import warnings
import numpy as np
from tqdm import tqdm
def jaccard_index(box_a, box_b, indices=[]):
"""
Compute the Jaccard Index (Intersection over Union) of 2 boxes. Each box is (x1, y1, x2, y2).
:param box_a:
:param box_b:
:param indices: The indices of box_a and box_b as [box_a_idx, box_b_idx]. Helps in debugging DivideByZero errors
:return:
"""
# area of bounding boxes
area_A = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])
area_B = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])
xA = max(box_a[0], box_b[0])
yA = max(box_a[1], box_b[1])
xB = min(box_a[2], box_b[2])
yB = min(box_a[3], box_b[3])
intersection = (xB - xA) * (yB - yA)
union = area_A + area_B - intersection
# return the intersection over union value
try:
if union <= 0:
iou = 0
else:
iou = intersection / union
except:
print(indices)
print(box_a)
print(box_b)
print(area_A, area_B, intersection)
exit(1)
return iou
def rect_dist(I, J):
if len(I.shape) == 1:
I = I[np.newaxis, :]
J = J[np.newaxis, :]
# area of boxes
aI = (I[:, 2] - I[:, 0] + 1) * (I[:, 3] - I[:, 1] + 1)
aJ = (J[:, 2] - J[:, 0] + 1) * (J[:, 3] - J[:, 1] + 1)
x1 = np.maximum(I[:, 0], J[:, 0])
y1 = np.maximum(I[:, 1], J[:, 1])
x2 = np.minimum(I[:, 2], J[:, 2])
y2 = np.minimum(I[:, 3], J[:, 3])
aIJ = (x2 - x1 + 1) * (y2 - y1 + 1) * (np.logical_and(x2 > x1, y2 > y1))
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
iou = aIJ / (aI + aJ - aIJ)
except (RuntimeWarning, Exception):
iou = np.zeros(aIJ.shape)
# set NaN, inf, and -inf to 0
iou[np.isnan(iou)] = 0
iou[np.isinf(iou)] = 0
dist = np.maximum(np.zeros(iou.shape), np.minimum(np.ones(iou.shape), 1 - iou))
return dist
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec)
Compute VOC AP given precision and recall.
Always uses the newer metric (in contrast to the '07 metric)
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def average_precision(confidence, dets, image_ids, class_recs, npos, ovthresh=0.5):
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = dets[sorted_ind, :]
img_ids = [image_ids[x] for x in sorted_ind]
nd = len(img_ids) # num of detections
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in tqdm(range(nd), total=nd):
R = class_recs[img_ids[d]]
bb = BB[d, :].astype(np.float)
ovmax = -np.inf
BBGT = R['bbox'].astype(np.float)
BBGT[:, 2] = BBGT[:, 0] + BBGT[:, 2] - 1
BBGT[:, 3] = BBGT[:, 1] + BBGT[:, 3] - 1
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.0)
ih = np.maximum(iymax - iymin, 0.0)
inters = iw * ih
# union
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) * (BBGT[:, 3] - BBGT[:, 1]) -
inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec)
return ap, prec, rec
def compute_model_score(pred_file, gt_file, class_id=3):
# load GT
GT = json.load(open(gt_file))
recs = {}
for g in GT:
recs[g["image"]["id"]] = g["bboxes"]
class_recs = {}
npos = 0
for img_id in recs.keys():
# get the list of all bboxes belonging to the particular class
R = [obj for obj in recs[img_id] if obj["category_id"] == class_id]
bboxes = np.array([x["bbox"] for x in R])
det = [False] * len(R) # to record if this object has already been recorded
npos = npos + len(R)
class_recs[img_id] = {
'bbox': bboxes,
'det': det
}
print("Loaded GT")
# Read the detections
with open(pred_file) as f:
preds = f.readlines()
preds = [json.loads(x) for x in preds]
confidence, BB, image_ids = [], [], []
for x in tqdm(preds, total=len(preds)):
confidence.extend(x["confidences"])
BB.extend(x["bboxes"])
image_ids.extend([x["id"]]*len(x['confidences']))
print("Loaded detections")
confidence = np.array(confidence)
BB = np.array(BB)
print(confidence.shape)
print(BB.shape)
ap, prec, rec = average_precision(confidence, BB, image_ids, class_recs, npos)
return ap, prec, rec
| 5,812 | 27.777228 | 116 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/__init__.py | """Utils module"""
from . import metrics
from . import nms
from . import dense_overlap
from . import k_medoids
| 112 | 15.142857 | 27 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/test_metrics.py | import numpy as np
from scipy.io import loadmat
from .metrics import jaccard_index, rect_dist
def test_rect_dist(x, y, gt_dist):
d = rect_dist(x, y)
print("Is my rect_dist code correct?", np.array_equal(d, gt_dist))
def main():
truth = loadmat('rect_dist.mat')
gt_dist = truth['d'][:, 0]
x = truth['labelRect']
y = truth['tLabelRect']
test_rect_dist(x, y, gt_dist)
print(rect_dist(x[0, :], y[0, :]))
if __name__ == "__main__":
main()
| 476 | 20.681818 | 70 | py |
tiny-faces-pytorch | tiny-faces-pytorch-master/utils/visualize.py | from PIL import Image, ImageDraw, ImageFont
import json
from pathlib import Path
import numpy as np
def draw_bounding_box(img, bbox, labels):
draw = ImageDraw.Draw(img)
font = ImageFont.load_default()
color = tuple(np.random.choice(range(100, 256), size=3))
draw.rectangle((bbox[0], bbox[1], bbox[2], bbox[3]), outline=color)
for i, k in enumerate(labels.keys()):
w, h = font.getsize(labels[k])
# draw.rectangle((bbox[0], bbox[1] + i*h, bbox[0] + w, bbox[1] + (i+2)*h), fill=color)
draw.text((bbox[0], bbox[1] + i*h), "{0}:{1:.3} ".format(k, labels[k]), fill=color)
return img
def draw_all_boxes(img, bboxes, categories):
for bbox, c in zip(bboxes, categories):
img = draw_bounding_box(img, bbox, c)
img.show()
def visualize_bboxes(image, bboxes):
"""
:param image: PIL image
:param bboxes:
:return:
"""
print("Number of GT bboxes", bboxes.shape[0])
for idx, bbox in enumerate(bboxes):
bbox = np.round(np.array(bbox))
# print(bbox)
image = draw_bounding_box(image, bbox, {"name": "{0}".format(idx)})
image.show(title="BBoxes")
def render_and_save_bboxes(image, image_id, bboxes, scores, scales, directory="qualitative"):
"""
Render the bboxes on the image and save the image
:param image: PIL image
:param image_id:
:param bboxes:
:param scores:
:param scales:
:param directory:
:return:
"""
for idx, bbox in enumerate(bboxes):
bbox = np.round(np.array(bbox))
image = draw_bounding_box(image, bbox, {'score': scores[idx], 'scale': scales[idx]})
image.save("{0}/{1}.jpg".format(directory, image_id)) | 1,696 | 27.762712 | 94 | py |
TSCP2 | TSCP2-main/src/main.py | import sys
import numpy as np
import argparse
import os
import tensorflow as tf
import matplotlib.pyplot as plt
# dataset
import TSCP2 as cp2
import losses as ls
from utils.DataHelper import load_dataset
from utils.estimate_CPD import estimate_CPs
parser = argparse.ArgumentParser(description='interface of running experiments for TSCP2 baselines')
parser.add_argument('--datapath', type=str, required=True, help='[ ./data ] prefix path to data directory')
parser.add_argument('--output', type=str, required=True, help='[ ./output ] prefix path to output directory')
parser.add_argument('--dataset', type=str, default='HASC', help='dataset name ')
parser.add_argument('--loss', type=str, default='nce', help='loss function ')
parser.add_argument('--sim', type=str, default='cosine', help='similarity metric ')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
# hyperparameters for grid search
#parser.add_argument('--window', type=int, nargs='+', default=100, help='window size')
parser.add_argument('--window', type=int, default=100, help='window size')
parser.add_argument('--code', type=int, default=10, help='size of encoded features')
parser.add_argument('--beta', type=float, default=1, help='parameter for FN loss function or threshold for FC loss function')
parser.add_argument('--epoch', type=int, default=100, help='max iteration for training')
parser.add_argument('--batch', type=int, default=64, help='batch_size for training')
parser.add_argument('--eval_freq', type=int, default=25, help='evaluation frequency per batch updates')
parser.add_argument('--temp', type=float, default=.1, help='Temperature parameter for NCE loss function')
parser.add_argument('--tau', type=float, default=.1, help='parameter for Debiased contrastive loss function')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
# sanity check
args = parser.parse_args()
if not os.path.exists(os.path.join(args.output,args.dataset)):
if not os.path.exists(args.output):
os.mkdir(args.output)
os.mkdir(os.path.join(args.output, "plots"))
os.mkdir(os.path.join(args.output, "pred_sim"))
os.mkdir(os.path.join(args.output, "model"))
os.mkdir(os.path.join(args.output,args.dataset))
os.mkdir(os.path.join(args.output,args.dataset, "plots"))
os.mkdir(os.path.join(args.output,args.dataset, "pred_sim"))
os.mkdir(os.path.join(args.output,args.dataset, "model"))
DATA_PATH = args.datapath
OUTPUT_PATH = os.path.join(args.output,args.dataset)
MODEL_PATH = os.path.join(args.output, "model")
DS_NAME = args.dataset
LOSS = args.loss
SIM = args.sim
GPU = args.gpu
WIN = args.window
CODE_SIZE = args.code
BATCH_SIZE = args.batch
EPOCHS = args.epoch
LR = args.lr
TEMP = args.temp
TAU = args.tau
BETA = args.beta
EVALFREQ = args.eval_freq
EPOCHS = EPOCHS * int(BATCH_SIZE / 4)
criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.SUM)
decay_steps = 1000
lr_decayed_fn = tf.keras.experimental.CosineDecay(
initial_learning_rate=LR, decay_steps=decay_steps)
optimizer = tf.keras.optimizers.Adam(lr=LR)
train_name = "CP2_model_" + DS_NAME + "_T" + str(TEMP) + "_WIN" + str(WIN) + \
"_BS" + str(BATCH_SIZE) + "_CS" + str(CODE_SIZE) + "_lr" + str(LR) + \
"_LOSS" + LOSS + "_SIM" + SIM + "_TAU" + str(TAU) + "_BETA" + str(BETA)
print("------------------------------------>>> " + train_name)
# -------------------------------
# 1 PREPARE DATASET
# -------------------------------
train_ds = load_dataset(DATA_PATH, DS_NAME, WIN, BATCH_SIZE, mode = "train")
# ------------------------
# 2 TRAINING
# ------------------------
prep_model = cp2.get_TCN_encoder((WIN,1), int(WIN / 2), CODE_SIZE)
if SIM == "cosine":
similarity = ls._cosine_simililarity_dim2
# elif sim_fn == "dtw":
# similarity = DTW
elif SIM == "euclidean":
similarity = ls._euclidean_similarity_dim2
elif SIM == "edit":
similarity = ls._edit_similarity_dim2
elif SIM == "nwarp":
similarity = ls._neural_warp_dim2
epoch_wise_loss, epoch_wise_sim, epoch_wise_neg, prep_model = cp2.train_prep(prep_model, train_ds, OUTPUT_PATH, optimizer,
criterion, train_name, WIN, temperature=TEMP,
epochs=EPOCHS, sfn=similarity, lfn=LOSS, beta=BETA, tau= TAU)
# SAVE MODEL and Learning Progress plot
#with plt.xkcd():
splot=1
if splot ==1:
fig, (ax1, ax2) = plt.subplots(2)
fig.suptitle(train_name)
ax1.plot(epoch_wise_loss, label="Loss")
ax2.plot(epoch_wise_sim, label="Positive pairs")
ax2.plot(epoch_wise_neg, label="Negative pairs")
plt.legend()
plt.savefig(os.path.join(OUTPUT_PATH,"plots", LOSS+"__"+train_name + "_LOSS.png"))
print("Learning progress plot saved!")
# -------------------------
# 4 TEST SET & SEGMENTATION
# -------------------------
test_ds = load_dataset(DATA_PATH, DS_NAME, WIN, BATCH_SIZE, mode = "test")
x_test, lbl_test = test_ds[:,1:], test_ds[:,0]
num = x_test.shape[0]
lbl_test = np.array(lbl_test).reshape((lbl_test.shape[0], 1))
history = prep_model(x_test[:, 0:WIN].reshape((num, 1, WIN)))
future = prep_model(x_test[:, WIN:].reshape((num, 1, WIN)))
pred_out = np.concatenate((lbl_test, history, future), 1)
rep_sim = ls._cosine_simililarity_dim1(history, future)
np.savetxt(os.path.join(OUTPUT_PATH, "pred_sim", train_name + "_pred_sim.csv"), np.concatenate((lbl_test, np.array(rep_sim).reshape((rep_sim.shape[0],1))),1), delimiter=',',
header="lbl,"+LOSS, comments="")
print("Saved test similarity result!")
print('Average similarity for test set : Reps : {}'.format(np.mean(rep_sim)))
gt = np.zeros(lbl_test.shape[0])
gt[np.where((lbl_test > int(2 * WIN * 0.15)) & (lbl_test < int(2 * WIN * 0.85)))[0]] = 1
# threshold_segmentation(h_pred,f_pred, gt, train_name, os.path.join(OUT_PATH,"Evaluation.txt"), threshold = np.mean(rep_sim) - np.std(rep_sim))
result = estimate_CPs(rep_sim, gt, os.path.join(OUTPUT_PATH, train_name),
os.path.join(OUTPUT_PATH, "Evaluation.txt"),
metric='cosine', threshold=epoch_wise_sim[-1] - ((epoch_wise_sim[-1]-epoch_wise_neg[-1])/3))
with open(os.path.join(OUTPUT_PATH, "Evaluation2.txt"), "a") as out_file:
out_file.write(str(BATCH_SIZE) + "," + str(WIN) + "," + str(CODE_SIZE) + "," + str(TEMP) + "," + str(
LR) + "," + str(np.mean(epoch_wise_loss))+ ","+str(epoch_wise_sim[-1]) + "," +str(epoch_wise_neg[-1])+","+result)
out_file.close()
print("Saved model to disk")
# -------------------------
# 3 SAVE THE MODEL
# -------------------------
prep_model.save_weights(os.path.join(MODEL_PATH, train_name + ".tf"))
model_json = prep_model.to_json()
with open(os.path.join(MODEL_PATH, train_name + ".json"), "w") as json_file:
json_file.write(model_json)
json_file.close()
print("Saved model to disk") | 6,984 | 43.775641 | 173 | py |
TSCP2 | TSCP2-main/src/losses.py | from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
cosine_sim_1d = tf.keras.losses.CosineSimilarity(axis=1, reduction=tf.keras.losses.Reduction.NONE)
cosine_sim_2d = tf.keras.losses.CosineSimilarity(axis=2, reduction=tf.keras.losses.Reduction.NONE)
def _cosine_simililarity_dim1(x, y):
v = cosine_sim_1d(x, y)
return -v
def _cosine_simililarity_dim2(x, y):
# x shape: (N, 1, C)
# y shape: (1, 2N, C)
# v shape: (N, 2N)
v = cosine_sim_2d(tf.expand_dims(x, 1), tf.expand_dims(y, 0))
return -v
def _dot_simililarity_dim1(x, y):
# x shape: (N, 1, C)
# y shape: (N, C, 1)
# v shape: (N, 1, 1)
v = tf.matmul(tf.expand_dims(x, 1), tf.expand_dims(y, 2))
return v
def _dot_simililarity_dim2(x, y):
v = tf.tensordot(tf.expand_dims(x, 1), tf.expand_dims(tf.transpose(y), 0), axes=2)
# x shape: (N, 1, C)
# y shape: (1, C, 2N)
# v shape: (N, 2N)
return v
def _euclidean_similarity_dim1(x, y):
# x shape: (N, 1, C)
# y shape: (N, 1, C)
# v shape: (N, 1, 1)
d = tf.sqrt(tf.reduce_sum(tf.square(x - y), axis=-1))
s = 1 / ( 1 + d )
return s
def _euclidean_similarity_dim2(x, y):
# x shape: (N, 1, C)
# y shape: (1, N, C)
# v shape: (N, 1, 1)
x1 = tf.expand_dims(x,1)
y1 = tf.expand_dims(y,0)
d = tf.sqrt(tf.reduce_sum(tf.square(x1 - y1), axis=2))
s = 1 / (1 + d)
return s
def _edit_similarity_dim1(x, y):
# x shape: (N, 1, C)
# y shape: (N, 1, C)
# v shape: (N, 1, 1)
d = tf.sqrt(tf.reduce_sum(tf.square(x - y), axis=-1))
s = 1 / ( 1 + d )
return s
def _edit_similarity_dim2(x, y):
# x shape: (N, 1, C)
# y shape: (1, N, C)
# v shape: (N, 1, 1)
x1 = tf.expand_dims(x,1)
y1 = tf.expand_dims(y,0)
d = tf.sqrt(tf.reduce_sum(tf.square(x1 - y1), axis=2))
s = 1 / (1 + d)
return s
def loss_fn(history, future, similarity, loss_fn = "nce", temperature=0.1, tau=0.1, beta=0.1, elimination_th = 0, elimination_topk = 0.1, attraction = False):
if loss_fn == "nce":
loss, pos, neg = nce_loss_fn(history, future, similarity, temperature)
elif loss_fn == "dcl":
loss, pos, neg = dcl_loss_fn(history, future, similarity, temperature, debiased = True, tau_plus = tau)
elif loss_fn == "fc":
loss, pos, neg = fc_loss_fn(history, future, similarity, temperature , elimination_th = elimination_th, elimination_topk = beta, attraction = attraction)
elif loss_fn == "harddcl":
loss, pos, neg = hard_loss_fn(history, future, similarity, temperature, beta=beta, debiased=True, tau_plus=tau)
return loss, pos, neg
def nce_loss_fn(history, future, similarity, temperature='0.1'):
criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.SUM)
#pos_sim = ls._cosine_simililarity_dim1(history, future) / temperature
#neg_sim = ls._cosine_simililarity_dim2(history, future) / temperature
N = history.shape[0]
sim = similarity(history, future)
pos_sim = K.exp(tf.linalg.tensor_diag_part(sim)/temperature)
tri_mask = np.ones(N ** 2, dtype=np.bool).reshape(N, N)
tri_mask[np.diag_indices(N)] = False
neg = tf.reshape(tf.boolean_mask(sim, tri_mask), [N, N - 1])
all_sim = K.exp(sim/temperature)
logits = tf.divide(K.sum(pos_sim), K.sum(all_sim, axis=1))
lbl = np.ones(history.shape[0])
# categorical cross entropy
loss = criterion(y_pred = logits, y_true = lbl)
# loss = K.sum(logits)
# divide by the size of batch
#loss = loss / lbl.shape[0]
# similarity of positive pairs (only for debug)
mean_sim = K.mean(tf.linalg.tensor_diag_part(sim))
mean_neg = K.mean(neg)
return loss, mean_sim, mean_neg
def dcl_loss_fn(history, future, similarity, temperature='0.1', debiased=True, tau_plus=0.1):
# from Debiased Contrastive Learning paper: https://github.com/chingyaoc/DCL/
# pos: exponential for positive example
# neg: sum of exponentials for negative examples
# N : number of negative examples
# t : temperature scaling
# tau_plus : class probability
N = history.shape[0]
sim = similarity(history, future)
pos_sim = K.exp(tf.linalg.tensor_diag_part(sim)/temperature)
tri_mask = np.ones(N ** 2, dtype=np.bool).reshape(N, N)
tri_mask[np.diag_indices(N)] = False
neg = tf.reshape(tf.boolean_mask(sim, tri_mask),[N,N-1])
neg_sim = K.exp(neg/temperature)
# estimator g()
if debiased:
N = N-1
Ng = (-tau_plus * N * pos_sim + K.sum(neg_sim, axis=-1)) / (1 - tau_plus)
# constrain (optional)
Ng = tf.clip_by_value(Ng, clip_value_min=N * np.e ** (-1 / temperature), clip_value_max=tf.float32.max)
else:
Ng = K.sum(neg_sim, axis=-1)
# contrastive loss
loss = K.mean(- tf.math.log(pos_sim / (pos_sim + Ng)))
# similarity of positive pairs (only for debug)
mean_sim = K.mean(tf.linalg.tensor_diag_part(sim))
mean_neg = K.mean(neg)
return loss, mean_sim, mean_neg
def fc_loss_fn(history, future, similarity, temperature=0.1, elimination_th = 0, elimination_topk = 0.1, attraction = False):
criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.SUM)
N = history.shape[0]
if elimination_topk > 0.5:
elimination_topk = 0.5
elimination_topk = np.math.ceil(elimination_topk * N)
sim = similarity(history, future)/temperature
pos_sim = K.exp(tf.linalg.tensor_diag_part(sim))
tri_mask = np.ones(N ** 2, dtype=np.bool).reshape(N, N)
tri_mask[np.diag_indices(N)] = False
neg_sim = tf.reshape(tf.boolean_mask(sim, tri_mask), [N, N - 1])
#sorted_ind = tf.argsort(neg_sim, axis=1)
sorted_sim = tf.sort(neg_sim, axis=1)
if elimination_th > 0:
# Threshold-base cancellation only --TODO
threshold = tf.constant([elimination_th])
mask = tf.cast(tf.math.greater(threshold,sorted_sim),tf.float32)
neg_count = tf.reduce_sum(mask,axis=1)
neg = tf.divide(tf.reduce_sum(sorted_sim * mask, axis=1),neg_count)
neg_sim = tf.reduce_sum(K.exp(sorted_sim/temperature) * mask, axis=1)
else:
# Top-K cancellation only
if elimination_topk == 0:
elimination_topk = 1
tri_mask = np.ones(N * (N - 1), dtype=np.bool).reshape(N, N - 1)
tri_mask[:, -elimination_topk:] = False
neg = tf.reshape(tf.boolean_mask(sorted_sim, tri_mask), [N, N - elimination_topk - 1])
neg_sim = K.sum(K.exp(neg), axis=1)
#logits = tf.divide(K.sum(pos_sim, axis=-1), pos_sim+neg_sim)
#lbl = np.ones(N)
# categorical cross entropy
#loss = criterion(y_pred = logits, y_true = lbl)
loss = K.mean(- tf.math.log(pos_sim / (pos_sim + neg_sim)))
# divide by the size of batch
#loss = loss / N
# similarity of positive pairs (only for debug)
mean_sim = K.mean(tf.linalg.tensor_diag_part(sim)) * temperature
mean_neg = K.mean(neg) * temperature
return loss, mean_sim, mean_neg
def hard_loss_fn(history, future, similarity, temperature, beta = 0, debiased = True, tau_plus = 0.1):
# from ICLR2021 paper: Contrastive LEarning with Hard Negative Samples https://www.groundai.com/project/contrastive-learning-with-hard-negative-samples
# pos: exponential for positive example
# neg: sum of exponentials for negative examples
# N : number of negative examples
# t : temperature scaling
# tau_plus : class probability
#
# reweight = (beta * neg) / neg.mean()
# Neg = max((-N * tau_plus * pos + reweight * neg).sum() / (1 - tau_plus), e ** (-1 / t))
# hard_loss = -log(pos.sum() / (pos.sum() + Neg))
N = history.shape[0]
sim = similarity(history, future)
pos_sim = K.exp(tf.linalg.tensor_diag_part(sim)/temperature)
tri_mask = np.ones(N ** 2, dtype=np.bool).reshape(N, N)
tri_mask[np.diag_indices(N)] = False
neg = tf.reshape(tf.boolean_mask(sim, tri_mask),[N,N-1])
neg_sim = K.exp(neg/temperature)
reweight = (beta * neg_sim) / tf.reshape(tf.reduce_mean(neg_sim, axis=1), [-1, 1])
if beta == 0:
reweight = 1
# estimator g()
if debiased:
N = N-1
#(-N*tau_plus*pos + reweight*neg).sum() / (1-tau_plus)
Ng = (-tau_plus * N * pos_sim + tf.reduce_sum(reweight * neg_sim, axis=-1)) / (1 - tau_plus)
# constrain (optional)
Ng = tf.clip_by_value(Ng, clip_value_min=np.e ** (-1 / temperature), clip_value_max=tf.float32.max)
else:
Ng = K.sum(neg_sim, axis=-1)
# contrastive loss
#loss = K.mean(- tf.math.log(pos_sim / (pos_sim + Ng)))
loss = K.mean(-tf.math.log(pos_sim / (pos_sim + Ng)))
# similarity of positive pairs (only for debug)
mean_sim = K.mean(tf.linalg.tensor_diag_part(sim))
mean_neg = K.mean(neg)
return loss, mean_sim, mean_neg
def _neural_warp_dim2():
loss, mean_sim, mean_neg = 0, 0, 0
return loss, mean_sim, mean_neg | 8,998 | 36.032922 | 161 | py |
TSCP2 | TSCP2-main/src/TSCP2.py | import os
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tqdm import tqdm
import tensorflow as tf
from tcn import TCN
import numpy as np
import losses as ls
from utils.usc_ds_helper import ts_samples
#@tf.function
def train_step(xis, xjs, amodel, optimizer, criterion, temperature, sfn, lfn, beta, tau):
# print("---------",xis.shape)
with tf.GradientTape() as tape:
zis = amodel(xis)
zjs = amodel(xjs)
# normalize projection feature vectors
zis = tf.math.l2_normalize(zis, axis=1)
zjs = tf.math.l2_normalize(zjs, axis=1)
#loss, mean_sim = ls.dcl_loss_fn(zis, zjs, temperature, lfn)
loss, mean_sim, neg_sim = ls.loss_fn(zis, zjs, similarity = sfn, loss_fn = lfn, temperature=temperature, tau = tau, beta = beta, elimination_th = 0, attraction = False)
gradients = tape.gradient(loss, amodel.trainable_variables)
optimizer.apply_gradients(zip(gradients, amodel.trainable_variables))
return loss, mean_sim, neg_sim
def train_prep(model, dataset, outpath, optimizer, criterion, train_name, win, temperature=0.1, epochs=100, sfn="cosine", lfn='nce', beta=0.1, tau=0.1):
beta_curr = beta
epoch_wise_loss = []
epoch_wise_sim = []
epoch_wise_neg = []
end_condition = 0
for epoch in tqdm(range(epochs)):
counter = 0
step_wise_loss = []
step_wise_sim = []
step_wise_neg = []
for mbatch in dataset:
counter += 1
a, b, lbl = ts_samples(mbatch, win)
# a = data_augmentation(mbatch)
# b = data_augmentation(mbatch)
loss, sim, neg = train_step(tf.expand_dims(a, axis=2), tf.expand_dims(b, axis=2), model, optimizer, criterion,
temperature, sfn, lfn, beta=beta_curr, tau=tau)
step_wise_loss.append(loss)
step_wise_sim.append(sim)
step_wise_neg.append(neg)
epoch_wise_loss.append(np.mean(step_wise_loss))
epoch_wise_sim.append(np.mean(step_wise_sim))
epoch_wise_neg.append(np.mean(step_wise_neg))
# wandb.log({"nt_INFONCEloss": np.mean(step_wise_loss)})
# wandb.log({"nt_sim": np.mean(step_wise_sim)})
#if epoch % (np.floor(epoch / 10)) == 0:
# beta_curr = beta_curr - (beta/10)
if epoch % 1 == 0:
result = "epoch: {} (step:{}) -loss: {:.3f} - avg rep sim : {:.3f} - avg rep neg : {:.3f}\n".format(epoch + 1,
counter,
np.mean(step_wise_loss),
np.mean(step_wise_sim),
np.mean(step_wise_neg))
with open(os.path.join(outpath, train_name + ".txt"), "a") as myfile:
myfile.write("{:.4f},{:.4f},{:.4f}\n".format(np.mean(step_wise_loss),
np.mean(step_wise_sim),
np.mean(step_wise_neg)))
myfile.close()
print(result)
if epoch > 5:
if np.abs(epoch_wise_loss[-1] - epoch_wise_loss[-2]) < 0.0001 or epoch_wise_loss[-2] < epoch_wise_loss[-1]:
end_condition += 1
else:
end_condition = 0
if end_condition == 4:
return epoch_wise_loss, epoch_wise_sim, epoch_wise_neg, model
return epoch_wise_loss, epoch_wise_sim, epoch_wise_neg, model
# Dilated-TCN
def get_TCN_encoder(f_shape, n_steps, code_size, nb_filters=64, kernel_size=4, nb_stacks=2, dilations=[1, 2, 4, 8],
padding='causal',
use_skip_connections=True, dropout_rate=0, activation='relu'):
inputs = Input(f_shape)
xrep = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
use_skip_connections, dropout_rate, return_sequences=True, activation='relu',
kernel_initializer='random_normal', use_batch_norm=True)(inputs)
# base_model.trainable = True
xrep = Flatten()(xrep)
xrep = Dense(2 * n_steps)(xrep)
xrep = Activation("relu")(xrep)
xrep = Dense(n_steps)(xrep)
xrep = Activation("relu")(xrep)
xrep = Dense(code_size)(xrep)
# xrep_dtcn.append(xrep)
# encoded = concatenate(xrep_dtcn, axis=-1)
# encoded = BatchNormalization()(xrep)
encoder = Model(inputs, xrep)
return encoder
| 4,665 | 41.036036 | 176 | py |
TSCP2 | TSCP2-main/src/helpers.py | import tensorflow as tf
import numpy as np
#from augmentation.gaussian_filter import GaussianBlur
def get_mask(batch_size):
# return a mask that removes the similarity score of equal/similar images.
# this function ensures that only distinct pair of images get their similarity scores
# passed as negative examples
negative_mask = np.zeros((batch_size, batch_size), dtype=bool)
for i in range(batch_size):
negative_mask[i, i] = 1
#negative_mask[i, i + batch_size] = 0
return tf.constant(negative_mask)
def get_negative_mask(batch_size):
# return a mask that removes the similarity score of equal/similar images.
# this function ensures that only distinct pair of images get their similarity scores
# passed as negative examples
negative_mask = np.ones((batch_size, 2 * batch_size), dtype=bool)
for i in range(batch_size):
negative_mask[i, i] = 0
negative_mask[i, i + batch_size] = 0
return tf.constant(negative_mask)
| 1,000 | 39.04 | 89 | py |
TSCP2 | TSCP2-main/src/utils/wsdm_ds_helper.py | import numpy as np
from pandas import read_csv
import pandas as pd
import csv
import os
def ts_samples(mbatch, win):
x = mbatch[:,1:win+1]
y = mbatch[:,-win:]
lbl = mbatch[:,0]
return x, y, lbl
def load_wsdm_ds(path, window, mode='train', part=1):
save_path = os.path.join("../data/slidingwindow")
"""
print(os.getcwd())
xfile_name = os.path.join(save_path,'norm_A4_all_window_label' + str(window) + ".csv")
lblfile_name = os.path.join(save_path, 'norm_A4_all_window_label' + str(window) + ".csv")
if os.path.isfile(xfile_name) == True:
X = read_csv(xfile_name, header=None).values
lbl = read_csv(lblfile_name, header=None).values
else:
"""
X, lbl = extract_windows(path, window, mode, part=part)
if mode == "all":
return X, lbl
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
X = X[idx,]
lbl = lbl[idx]
id = np.where(lbl==0)[0]
num_neg = X.shape[0] - id.size
num_train = id.size - num_neg
trainx = X[id]
trainlbl = lbl[id]
print('pure samples : ', id.size)
if mode == 'train':
return trainx[:num_train, ], trainlbl[:num_train, ]
else:
neg_id = np.where((lbl > int(window*0.1)) & (lbl < int(window*0.9)))[0]
# print('neg samples : ', neg_id.size)
testx = np.vstack((X[neg_id], trainx[num_train:, ]))
testlbl = np.vstack((lbl[neg_id], trainlbl[num_train:, ]))
# print('neg shape : ', testx.shape)
idx = np.arange(testx.shape[0])
np.random.shuffle(idx)
testx = testx[idx,]
testlbl = testlbl[idx,]
# print('2neg shape : ', testx.shape)
return testx, testlbl
def extract_windows(path, window_size, mode="train", part=1):
#files = os.scandir(path)
window_size
windows = []
lbl = []
first = True
num_cp = 0
X, C= load_WISDM_dataset(path, mode, win=window_size, overlap=0.9, part=part)
#X = np.concatenate((X,Y), axis=-1)
windows = X
lbl = C
"""
for f in files:
if f.name != 'A4Benchmark_all.csv':
data = pd.read_csv(f)
cp = data['changepoint']
ts = remove_anomalies(data['value'].values, data['anomaly'])
ts = (ts - np.min(ts)) / (np.max(ts) - np.min(ts))
# ts = ts.values
for i in range(0, ts.shape[0] - window_size, step):
windows.append(np.array(ts[i:i + window_size]))
# print("TS",ts[i:i+window_size])
is_cp = np.where(cp[i:i + window_size] == 1)[0]
if is_cp.size == 0:
is_cp = [0]
else:
num_cp += 1
lbl.append(is_cp[0])
# print(is_cp)
first = False
print("number of samples : {} / number of samples with change point : {}".format(len(windows), num_cp))
windows = np.array(windows)
if save:
outfile = open(os.path.join(save_path, 'norm_A4_all_window' + str(window_size) + ".csv"), 'w', newline='')
writer = csv.writer(outfile)
writer.writerows(windows)
outfile.close()
outfile = open(os.path.join(save_path, 'norm_A4_all_window_label' + str(window_size) + ".csv"), 'w', newline='')
writer = csv.writer(outfile)
writer.writerows(np.array(lbl))
outfile.close()
"""
return windows, np.array(lbl) | 3,412 | 30.601852 | 120 | py |
TSCP2 | TSCP2-main/src/utils/DataHelper.py | import numpy as np
import tensorflow as tf
from .hasc_helper import load_hasc_ds
from .usc_ds_helper import load_usc_ds
def load_dataset(path, ds_name, win, bs, mode="train"):
if ds_name == 'HASC':
trainx, trainlbl = load_hasc_ds(path, window = 2 * win, mode=mode)
elif ds_name == "USC":
trainx, trainlbl = load_usc_ds(path, window=2 * win, mode=mode)
else:
raise ValueError("Undefined Dataset.")
trainlbl = trainlbl.reshape((trainlbl.shape[0], 1))
print(trainx.shape, trainlbl.shape)
dataset = np.concatenate((trainlbl, trainx), 1)
print("dataset shape : ", dataset.shape)
if mode == "test":
return dataset
# Create TensorFlow dataset
train_ds = tf.data.Dataset.from_tensor_slices(dataset)
train_ds = (train_ds.batch(bs, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE))
return train_ds
| 883 | 34.36 | 96 | py |
TSCP2 | TSCP2-main/src/utils/FastDTW.py | #!/usr/bin/env python
# coding: utf-8
# In[10]:
from __future__ import absolute_import, division
import numbers
import numpy as np
from collections import defaultdict
def __reduce_by_half(x):
return [(x[i] + x[1+i]) / 2 for i in range(0, len(x) - len(x) % 2, 2)]
# In[11]:
def __expand_window(path, len_x, len_y, radius):
path_ = set(path)
for i, j in path:
for a, b in ((i + a, j + b)
for a in range(-radius, radius+1)
for b in range(-radius, radius+1)):
path_.add((a, b))
window_ = set()
for i, j in path_:
for a, b in ((i * 2, j * 2), (i * 2, j * 2 + 1),
(i * 2 + 1, j * 2), (i * 2 + 1, j * 2 + 1)):
window_.add((a, b))
window = []
start_j = 0
for i in range(0, len_x):
new_start_j = None
for j in range(start_j, len_y):
if (i, j) in window_:
window.append((i, j))
if new_start_j is None:
new_start_j = j
elif new_start_j is not None:
break
start_j = new_start_j
return window
# In[6]:
def __prep_inputs(x, y, dist):
x = np.asanyarray(x, dtype='float')
y = np.asanyarray(y, dtype='float')
if x.ndim == y.ndim > 1 and x.shape[1] != y.shape[1]:
raise ValueError('second dimension of x and y must be the same')
if isinstance(dist, numbers.Number) and dist <= 0:
raise ValueError('dist cannot be a negative integer')
if dist is None:
if x.ndim == 1:
dist = __difference
else:
dist = __norm(p=1)
elif isinstance(dist, numbers.Number):
dist = __norm(p=dist)
return x, y, dist
# In[16]:
def __fastdtw(x, y, radius, dist):
min_time_size = radius + 2
if len(x) < min_time_size or len(y) < min_time_size:
return dtw(x, y, dist=dist)
x_shrinked = __reduce_by_half(x)
y_shrinked = __reduce_by_half(y)
distance, path = __fastdtw(x_shrinked, y_shrinked, radius=radius, dist=dist)
window = __expand_window(path, len(x), len(y), radius)
return __dtw(x, y, window, dist=dist)
# In[4]:
def __norm(p):
return lambda a, b: np.linalg.norm(np.atleast_1d(a) - np.atleast_1d(b), p)
# In[2]:
def __difference(a, b):
return abs(a - b)
# In[3]:
def fastdtw(x, y, radius=1, dist=None):
''' return the approximate distance between 2 time series with O(N)
time and memory complexity
Parameters
----------
x : array_like
input array 1
y : array_like
input array 2
radius : int
size of neighborhood when expanding the path. A higher value will
increase the accuracy of the calculation but also increase time
and memory consumption. A radius equal to the size of x and y will
yield an exact dynamic time warping calculation.
dist : function or int
The method for calculating the distance between x[i] and y[j]. If
dist is an int of value p > 0, then the p-norm will be used. If
dist is a function then dist(x[i], y[j]) will be used. If dist is
None then abs(x[i] - y[j]) will be used.
Returns
-------
distance : float
the approximate distance between the 2 time series
path : list
list of indexes for the inputs x and y
Examples
--------
'''
x, y, dist = __prep_inputs(x, y, dist)
return __fastdtw(x, y, radius, dist)
# In[7]:
def dtw(x, y, dist=None):
''' return the distance between 2 time series without approximation
Parameters
----------
x : array_like
input array 1
y : array_like
input array 2
dist : function or int
The method for calculating the distance between x[i] and y[j]. If
dist is an int of value p > 0, then the p-norm will be used. If
dist is a function then dist(x[i], y[j]) will be used. If dist is
None then abs(x[i] - y[j]) will be used.
Returns
-------
distance : float
the approximate distance between the 2 time series
path : list
list of indexes for the inputs x and y
Examples
--------
'''
x, y, dist = __prep_inputs(x, y, dist)
return __dtw(x, y, None, dist)
# In[17]:
def __dtw(x, y, window, dist):
len_x, len_y = len(x), len(y)
if window is None:
window = [(i, j) for i in range(len_x) for j in range(len_y)]
window = ((i + 1, j + 1) for i, j in window)
D = defaultdict(lambda: (float('inf'),))
D[0, 0] = (0, 0, 0)
for i, j in window:
dt = dist(x[i-1], y[j-1])
D[i, j] = min((D[i-1, j][0]+dt, i-1, j), (D[i, j-1][0]+dt, i, j-1),
(D[i-1, j-1][0]+dt, i-1, j-1), key=lambda a: a[0])
path = []
i, j = len_x, len_y
while not (i == j == 0):
path.append((i-1, j-1))
i, j = D[i, j][1], D[i, j][2]
path.reverse()
return (D[len_x, len_y][0], path)
| 5,152 | 25.425641 | 80 | py |
TSCP2 | TSCP2-main/src/utils/yahoo_ds_helper.py | from math import floor
import numpy as np
from pandas import read_csv
import pandas as pd
import csv
import os
def ts_samples(mbatch, win):
x = mbatch[:,1:win+1]
y = mbatch[:,-win:]
lbl = mbatch[:,0]
return x, y, lbl
def load_yahoo_ds(path, window, mode='train'):
save_path = os.path.join("../data/yahoo")
X, lbl = extract_windows(path, window, 5, save_path, save=False)
if mode == "all":
return X, lbl
train_size = floor(0.6 * X.shape[0])
if mode =="train":
trainx = X[0:train_size,:]
trainlbl =lbl[0:train_size]
idx = np.arange(trainx.shape[0])
np.random.shuffle(idx)
trainx = trainx[idx,]
trainlbl = trainlbl[idx]
print('train samples : ', train_size)
return trainx, trainlbl
else:
testx = X[train_size:,]
testlbl = lbl[train_size:]
print('test shape {} and number of change points {} '.format(testx.shape, len(np.where(testlbl>0)[0])))
return testx, testlbl
def remove_anomalies(TS, anomalies):
index = np.where(anomalies == 1)[0]
# print('anomaly :',index)
for i in index:
if i > 0 and i + 1 < TS.shape[0]:
TS[i] = (TS[i - 1] + TS[i + 1]) / 2
elif i > 0:
TS[i] = TS[i - 1]
if i == 0:
TS[i] = TS[i + 1]
return TS
def extract_windows(path, window_size, step, save_path, save=False):
files = os.scandir(path)
window_size
windows = []
lbl = []
first = True
num_cp = 0
for f in files:
if f.name != 'A4Benchmark_all.csv':
data = pd.read_csv(f)
cp = data['changepoint']
ts = remove_anomalies(data['value'].values, data['anomaly'])
ts = (ts - np.min(ts)) / (np.max(ts) - np.min(ts))
# ts = ts.values
for i in range(0, ts.shape[0] - window_size, step):
windows.append(np.array(ts[i:i + window_size]))
# print("TS",ts[i:i+window_size])
is_cp = np.where(cp[i:i + window_size] == 1)[0]
if is_cp.size == 0:
is_cp = [0]
else:
num_cp += 1
lbl.append(is_cp[0])
# print(is_cp)
first = False
print("number of samples : {} / number of samples with change point : {}".format(len(windows), num_cp))
windows = np.array(windows)
if save:
outfile = open(os.path.join(save_path, 'norm_A4_all_window' + str(window_size) + ".csv"), 'w', newline='')
writer = csv.writer(outfile)
writer.writerows(windows)
outfile.close()
outfile = open(os.path.join(save_path, 'norm_A4_all_window_label' + str(window_size) + ".csv"), 'w', newline='')
writer = csv.writer(outfile)
writer.writerows(np.array(lbl))
outfile.close()
return windows, np.array(lbl) | 2,906 | 28.363636 | 120 | py |
TSCP2 | TSCP2-main/src/utils/analysis_helper.py | import csv
import os
import numpy as np
from numpy import mean
from numpy import std
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.metrics import mean_squared_error
from utils.FastDTW import fastdtw
def getMSE(Yt, y_pred, sq = False):
y_hat = y_pred.reshape((Yt.shape[0], Yt.shape[1], Yt.shape[2]))
print('y_hat : ', y_hat.shape)
loss = np.zeros((Yt.shape[0], Yt.shape[1]))
err_total = np.zeros((Yt.shape[0]))
for i in range(0, Yt.shape[0]):
a = Yt[i, :, :].T
b = y_hat[i, :, :].T
err = mean_squared_error(a, b, multioutput='raw_values')
loss[i, :] = err
err_total[i] = mean_squared_error(a, b)
return loss, err_total
def getPE(Yt, y_pred, sq = False):
y_hat = y_pred.reshape((Yt.shape[0], Yt.shape[1], Yt.shape[2]))
print('y_hat : ', y_hat.shape)
loss = np.zeros((Yt.shape[0], Yt.shape[1]))
err_total = np.zeros((Yt.shape[0]))
for i in range(0, Yt.shape[0]):
a = Yt[i, :, :].T
b = y_hat[i, :, :].T
err = (a-b)/b
loss[i, :] = err
err_total[i] = np.mean(err)
return loss, err_total
def getMSPE(Yt, y_pred, sq = False):
y_hat = y_pred.reshape((Yt.shape[0], Yt.shape[1], Yt.shape[2]))
loss = np.zeros((Yt.shape[0], Yt.shape[1]))
err_total = np.zeros((Yt.shape[0]))
for i in range(0, Yt.shape[0]):
a = Yt[i, :, :]
b = y_hat[i, :, :]
mse = np.mean(np.abs(a-b)/(np.abs(np.max(b)-np.min(b))), axis=1) #* (np.abs(np.abs(np.max(b)-np.min(b))-np.abs(np.max(a)-np.min(a))))
loss[i,:]=mse
err_total[i] = np.mean(mse)
return loss, err_total
def remove_outliers(data, m=2):
for i in range(0, data.shape[1]):
outliers = abs(data[:,i] - np.mean(data[:,i])) >= m * np.std(data[:,i])
print('Min({}), Max({}), median :{}, mean : {} and std {} '.format(np.min(data[:,i]),np.max(data[:,i]),np.median(data[:,i]),np.mean(data[:,i]),np.std(data[:,i])))
#data[outliers,i]= m * np.std(data[:,i])
#print(outlier)
return data
def plot_predict(y, p, model):
plt.plot(y, label='real data')
plt.plot(p, label='prediction')
plt.legend()
plt.title(model + " (loss" + str(mean_squared_error(y, p)) + ")")
def getDTW(Yt, y_pred):
y_hat = y_pred.reshape((Yt.shape[0], Yt.shape[1], Yt.shape[2]))
print('y_hat : ', y_hat.shape)
dtw = np.zeros((Yt.shape[0], Yt.shape[1]))
#dtw_total = np.zeros((Yt.shape[0]))
for i in range(0, Yt.shape[0]):
for j in range(0, Yt.shape[1]):
dtw[i, j],_ = fastdtw(Yt[i, j, :], y_hat[i, j, :])
dtw_total = np.mean(dtw, axis=1)
print('dtw : {} , dta_total : {}'.format(dtw.shape,dtw_total.shape))
return dtw, dtw_total
def dtw_boundaries(dtw_tot, order, cp_size,PATH, MODEL_NAME, option_str,mode):
errors = np.gradient(np.array(dtw_tot)).tolist()
predBoundaries = signal.argrelextrema(np.asarray(errors), np.greater, order=order)[0].tolist()
predBoundaries.append(cp_size - 1)
writer = csv.writer(
open(os.path.join(PATH, MODEL_NAME + option_str + "-"+mode+"-boundaries-order"+ str(order)+".csv"), "w", newline=''))
writer.writerows(np.array([[x] for x in predBoundaries]))
def rep_visu(x_test,win,history, future, lbl):
N = 20
id = np.random.randint(0, history.shape[0], N)
plt.figure(figsize=(15, 40))
for i in range(0, N):
plt.subplot(N, 2, 1 + i * 2)
plt.title('actual ts' + str(lbl[id[i]]))
plt.plot(x_test[id[i], 0:win], color='k')
plt.plot(x_test[id[i], win:], color='b')
plt.legend(['history', 'future'])
plt.subplot(N, 2, 2 + i * 2)
plt.title('reps')
plt.plot(history[id[i]], color='k')
plt.plot(future[id[i]], color='b')
# plt.legend(['history','future'])
plt.show() | 3,827 | 35.457143 | 171 | py |
TSCP2 | TSCP2-main/src/utils/usc_ds_helper.py | from math import floor
import numpy as np
import pandas as pd
import scipy.io as sio
import csv
def ts_samples(mbatch, win):
x = mbatch[:,1:win+1]
y = mbatch[:,-win:]
lbl = mbatch[:,0]
return x, y, lbl
def load_usc_ds(path, window, mode='train'):
X, lbl = extract_windows(path, window, mode)
if mode == "all":
return X, lbl
train_size = int(floor(0.8 * X.shape[0]))
if mode == "train":
trainx = X[0:train_size]
trainlbl = lbl[0:train_size]
idx = np.arange(trainx.shape[0])
np.random.shuffle(idx)
trainx = trainx[idx,]
trainlbl = trainlbl[idx]
print('train samples : ', train_size)
return trainx, trainlbl
else:
testx = X[train_size:]
testlbl = lbl[train_size:]
print('test shape {} and number of change points {} '.format(testx.shape, len(np.where(testlbl > 0)[0])))
return testx, testlbl
def extract_windows(path, window_size, mode="train"):
#files = os.scandir(path)
windows = []
lbl = []
dataset = sio.loadmat(path+"usc.mat")
ts = np.array(dataset['Y'])
ts = ts[:,0]
cp = np.array(dataset['L'])
cp = cp[:,0]
#cp = pd.read_csv(path+"usc_label.csv")
num_cp = 0
#ts = np.sqrt(np.power(x[:, 0], 2) + np.power(x[:, 1], 2) + np.power(x[:, 2], 2))
for i in range(0, ts.shape[0] - window_size, 5):
windows.append(np.array(ts[i:i + window_size]))
# print("TS",ts[i:i+window_size])
is_cp = np.where(cp[i:i + window_size] == 1)[0]
if is_cp.size == 0:
is_cp = [0]
else:
num_cp += 1
lbl.append(is_cp[0])
# print(is_cp)
print("number of samples : {} / number of samples with change point : {}".format(len(windows), num_cp))
windows = np.array(windows)
return windows, np.array(lbl) | 1,866 | 26.057971 | 113 | py |
TSCP2 | TSCP2-main/src/utils/logger.py | import logging
import logging.config
import yaml
import os
def get_logger(mod_name, log_dir):
if not os.path.exists(log_dir):
os.mkdir(log_dir)
config_filepath = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'logger_config.yml')
if os.path.exists(config_filepath):
with open(config_filepath, 'r') as f:
config = yaml.safe_load(f.read())
config["handlers"]["file"]["filename"] = os.path.join(log_dir, mod_name+'.log')
logging.config.dictConfig(config)
else:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(mod_name)
logger.info("Started log {}".format(os.path.join(log_dir, mod_name)))
return logger | 721 | 31.818182 | 100 | py |
TSCP2 | TSCP2-main/src/utils/hasc_helper.py | from math import floor
import numpy as np
from pandas import read_csv
import pandas as pd
import csv
import os
def ts_samples(mbatch, win):
x = mbatch[:,1:win+1]
y = mbatch[:,-win:]
lbl = mbatch[:,0]
return x, y, lbl
def load_hasc_ds(path, window, mode='train'):
X, lbl = extract_windows(path, window, 5)
if mode == "all":
return X, lbl
train_size = floor(0.8 * X.shape[0])
if mode =="train":
trainx = X[0:train_size]
trainlbl =lbl[0:train_size]
idx = np.arange(trainx.shape[0])
np.random.shuffle(idx)
trainx = trainx[idx,]
trainlbl = trainlbl[idx]
print('train samples : ', train_size)
return trainx, trainlbl
else:
testx = X[train_size:]
testlbl = lbl[train_size:]
print('test shape {} and number of change points {} '.format(testx.shape, len(np.where(testlbl>0)[0])))
return testx, testlbl
def extract_windows(path, window_size, step):
files = os.scandir(path)
window_size
windows = []
lbl = []
first = True
num_cp = 0
for f in files:
dataset = pd.read_csv(f).values
x = dataset[:,1:]
cp = dataset[:,0]
ts = np.sqrt(np.power(x[:, 0], 2) + np.power(x[:, 1], 2) + np.power(x[:, 2], 2))
for i in range(0, ts.shape[0] - window_size, step):
windows.append(np.array(ts[i:i + window_size]))
# print("TS",ts[i:i+window_size])
is_cp = np.where(cp[i:i + window_size] == 1)[0]
if is_cp.size == 0:
is_cp = [0]
else:
num_cp += 1
lbl.append(is_cp[0])
# print(is_cp)
print("number of samples : {} / number of samples with change point : {}".format(len(windows), num_cp))
windows = np.array(windows)
return windows, np.array(lbl) | 1,878 | 24.739726 | 111 | py |
TSCP2 | TSCP2-main/src/utils/estimate_CPD.py | import numpy as np
from sklearn.metrics import confusion_matrix,f1_score
from matplotlib import pyplot as plt
from losses import _cosine_simililarity_dim1
def estimate_CPs(sim, gt, name, train_name, metric='cosine', threshold=0.5):
#if metric == "cosine":
# sim = _cosine_simililarity_dim1(h, f)
est_cp = np.zeros(sim.shape[0])
est_cp[np.where(sim < threshold)[0]] = 1
tn, fp, fn, tp = confusion_matrix(gt, est_cp).ravel()
f1 = f1_score(gt, est_cp)
## gt==1
gt_id = np.where(gt == 1)[0]
"""
plt.figure(figsize=(15, 7))
plt.subplot(2, 1, 1)
for i in gt_id:
plt.axvline(x=i, ymin=0, ymax=1, color='k')
plt.subplot(2, 1, 2)
for i in np.where(est_cp == 1)[0]:
plt.axvline(x=i, ymin=0, ymax=1, color='r')
plt.savefig(name+".png")
plt.savefig(name + ".pdf")
"""
print("tn {}, fp {}, fn {}, tp {} ----- f1-score {}".format(tn, fp, fn, tp, f1))
## continuous series
i = 1
pos, seq_tp, seq_fn, seq_fp = 0, 0, 0, 0
while i < gt.shape[0]:
if gt[i] == 1:
pos += 1
j = i
while gt[i] == 1:
i += 1
if np.sum(est_cp[j:i]) > 0:
seq_tp += 1
est_cp[j:i] = 0
else:
seq_fn += 1
i += 1
seq_fp = np.where(np.diff(est_cp) == 1)[0].shape[0]
seq_f1 = (2 * seq_tp) / (2 * seq_tp + seq_fn + seq_fp)
print("SEQ : Pos {}, fp {}, fn {}, tp {} ----- f1-score {}".format(pos, seq_fp, seq_fn, seq_tp, seq_f1))
result = "tn, {}, fp, {}, fn, {}, tp, {}, f1-score, {}, Pos, {}, seqfp, {}, seqfn, {}, seqtp, {}, seqf1, {}\n".format(tn, fp, fn, tp, f1, pos, seq_fp, seq_fn, seq_tp, seq_f1)
return result | 1,741 | 30.672727 | 178 | py |
TSCP2 | TSCP2-main/src/utils/__init__.py | 0 | 0 | 0 | py | |
DG-Font | DG-Font-main/main.py | import argparse
import warnings
from datetime import datetime
from glob import glob
from shutil import copyfile
from collections import OrderedDict
import torch.nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from models.generator import Generator as Generator
from models.discriminator import Discriminator as Discriminator
from models.guidingNet import GuidingNet
from models.inception import InceptionV3
from train.train import trainGAN
from validation.validation import validateUN
from tools.utils import *
from datasets.datasetgetter import get_dataset
from tools.ops import initialize_queue
from tensorboardX import SummaryWriter
# Configuration
parser = argparse.ArgumentParser(description='PyTorch GAN Training')
parser.add_argument('--data_path', type=str, default='../data',
help='Dataset directory. Please refer Dataset in README.md')
parser.add_argument('--workers', default=4, type=int, help='the number of workers of data loader')
parser.add_argument('--model_name', type=str, default='GAN',
help='Prefix of logs and results folders. '
'ex) --model_name=ABC generates ABC_20191230-131145 in logs and results')
parser.add_argument('--epochs', default=250, type=int, help='Total number of epochs to run. Not actual epoch.')
parser.add_argument('--iters', default=1000, type=int, help='Total number of iterations per epoch')
parser.add_argument('--batch_size', default=32, type=int,
help='Batch size for training')
parser.add_argument('--val_num', default=190, type=int,help='Number of test images for each style')
parser.add_argument('--val_batch', default=10, type=int,
help='Batch size for validation. '
'The result images are stored in the form of (val_batch, val_batch) grid.')
parser.add_argument('--log_step', default=100, type=int)
parser.add_argument('--sty_dim', default=128, type=int, help='The size of style vector')
parser.add_argument('--output_k', default=400, type=int, help='Total number of classes to use')
parser.add_argument('--img_size', default=80, type=int, help='Input image size')
parser.add_argument('--dims', default=2048, type=int, help='Inception dims for FID')
parser.add_argument('--load_model', default=None, type=str, metavar='PATH',
help='path to latest checkpoint (default: None)'
'ex) --load_model GAN_20190101_101010'
'It loads the latest .ckpt file specified in checkpoint.txt in GAN_20190101_101010')
parser.add_argument('--validation', dest='validation', action='store_true',
help='Call for valiation only mode')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--gpu', default='0', type=str,
help='GPU id to use.')
parser.add_argument('--ddp', dest='ddp', action='store_true', help='Call if using DDP')
parser.add_argument('--port', default='8993', type=str)
parser.add_argument('--iid_mode', default='iid+', type=str, choices=['iid', 'iid+'])
parser.add_argument('--w_gp', default=10.0, type=float, help='Coefficient of GP of D')
parser.add_argument('--w_rec', default=0.1, type=float, help='Coefficient of Rec. loss of G')
parser.add_argument('--w_adv', default=1.0, type=float, help='Coefficient of Adv. loss of G')
parser.add_argument('--w_vec', default=0.01, type=float, help='Coefficient of Style vector rec. loss of G')
parser.add_argument('--w_off', default=0.5, type=float, help='Coefficient of offset normalization. loss of G')
def main():
####################
# Default settings #
####################
args = parser.parse_args()
print("PYTORCH VERSION", torch.__version__)
args.data_dir = args.data_path
args.start_epoch = 0
args.train_mode = 'GAN'
den = args.iters//args.iters
# unsup_start : train networks with supervised data only before unsup_start
# separated : train IIC only until epoch = args.separated
# ema_start : Apply EMA to Generator after args.ema_start
args.unsup_start = 0
args.separated = 0
args.ema_start = 1
args.fid_start = 1
args.unsup_start = args.unsup_start // den
args.separated = args.separated // den
args.ema_start = args.ema_start // den
args.fid_start = args.fid_start // den
# Cuda Set-up
if args.gpu is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.multiprocessing_distributed = False
if len(args.gpu) > 1:
args.multiprocessing_distributed = True
print(args.multiprocessing_distributed)
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
print(args.distributed)
ngpus_per_node = torch.cuda.device_count()
args.ngpus_per_node = ngpus_per_node
print("MULTIPROCESSING DISTRIBUTED : ", args.multiprocessing_distributed)
# Logs / Results
if args.load_model is None:
args.model_name = '{}_{}'.format(args.model_name, datetime.now().strftime("%Y%m%d-%H%M%S"))
else:
args.model_name = args.load_model
makedirs('./logs')
makedirs('./results')
args.log_dir = os.path.join('./logs', args.model_name)
args.event_dir = os.path.join(args.log_dir, 'events')
args.res_dir = os.path.join('./results', args.model_name)
makedirs(args.log_dir)
dirs_to_make = next(os.walk('./'))[1]
not_dirs = ['.idea', '.git', 'logs', 'results', '.gitignore', '.nsmlignore', 'resrc']
makedirs(os.path.join(args.log_dir, 'codes'))
for to_make in dirs_to_make:
if to_make in not_dirs:
continue
makedirs(os.path.join(args.log_dir, 'codes', to_make))
makedirs(args.res_dir)
if args.load_model is None:
pyfiles = glob("./*.py")
for py in pyfiles:
copyfile(py, os.path.join(args.log_dir, 'codes') + "/" + py)
for to_make in dirs_to_make:
if to_make in not_dirs:
continue
tmp_files = glob(os.path.join('./', to_make, "*.py"))
for py in tmp_files:
copyfile(py, os.path.join(args.log_dir, 'codes', py[2:]))
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
if len(args.gpu) == 1:
args.gpu = 0
else:
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:'+args.port,
world_size=args.world_size, rank=args.rank)
# # of GT-classes
args.num_cls = args.output_k
# Classes to use
args.att_to_use = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399]
# IIC statistics
args.epoch_acc = []
args.epoch_avg_subhead_acc = []
args.epoch_stats = []
# Logging
logger = SummaryWriter(args.event_dir)
# build model - return dict
networks, opts = build_model(args)
# load model if args.load_model is specified
load_model(args, networks, opts)
cudnn.benchmark = True
# get dataset and data loader
train_dataset, val_dataset = get_dataset(args)
train_loader, val_loader, train_sampler = get_loader(args, {'train': train_dataset, 'val': val_dataset})
# map the functions to execute - un / sup / semi-
trainFunc, validationFunc = map_exec_func(args)
# print all the argument
print_args(args)
# All the test is done in the training - do not need to call
if args.validation:
validationFunc(val_loader, networks, 999, args, {'logger': logger})
return
# For saving the model
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
record_txt = open(os.path.join(args.log_dir, "record.txt"), "a+")
for arg in vars(args):
record_txt.write('{:35}{:20}\n'.format(arg, str(getattr(args, arg))))
record_txt.close()
# Run
#validationFunc(val_loader, networks, 0, args, {'logger': logger, 'queue': queue})
for epoch in range(args.start_epoch, args.epochs):
print("START EPOCH[{}]".format(epoch+1))
if (epoch + 1) % (args.epochs // 25) == 0:
save_model(args, epoch, networks, opts)
if args.distributed:
train_sampler.set_epoch(epoch)
if epoch == args.ema_start and 'GAN' in args.train_mode:
if args.distributed:
networks['G_EMA'].module.load_state_dict(networks['G'].module.state_dict())
else:
networks['G_EMA'].load_state_dict(networks['G'].state_dict())
trainFunc(train_loader, networks, opts, epoch, args, {'logger': logger})
validationFunc(val_loader, networks, epoch, args, {'logger': logger})
#################
# Sub functions #
#################
def print_args(args):
for arg in vars(args):
print('{:35}{:20}\n'.format(arg, str(getattr(args, arg))))
def build_model(args):
args.to_train = 'CDG'
networks = {}
opts = {}
if 'C' in args.to_train:
networks['C'] = GuidingNet(args.img_size, {'cont': args.sty_dim, 'disc': args.output_k})
networks['C_EMA'] = GuidingNet(args.img_size, {'cont': args.sty_dim, 'disc': args.output_k})
if 'D' in args.to_train:
networks['D'] = Discriminator(args.img_size, num_domains=args.output_k)
if 'G' in args.to_train:
networks['G'] = Generator(args.img_size, args.sty_dim, use_sn=False)
networks['G_EMA'] = Generator(args.img_size, args.sty_dim, use_sn=False)
if args.distributed:
if args.gpu is not None:
print('Distributed to', args.gpu)
torch.cuda.set_device(args.gpu)
args.batch_size = int(args.batch_size / args.ngpus_per_node)
args.workers = int(args.workers / args.ngpus_per_node)
for name, net in networks.items():
if name in ['inceptionNet']:
continue
net_tmp = net.cuda(args.gpu)
networks[name] = torch.nn.parallel.DistributedDataParallel(net_tmp, device_ids=[args.gpu], output_device=args.gpu)
else:
for name, net in networks.items():
net_tmp = net.cuda()
networks[name] = torch.nn.parallel.DistributedDataParallel(net_tmp)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
for name, net in networks.items():
networks[name] = net.cuda(args.gpu)
else:
for name, net in networks.items():
networks[name] = torch.nn.DataParallel(net).cuda()
if 'C' in args.to_train:
opts['C'] = torch.optim.Adam(
networks['C'].module.parameters() if args.distributed else networks['C'].parameters(),
1e-4, weight_decay=0.001)
if args.distributed:
networks['C_EMA'].module.load_state_dict(networks['C'].module.state_dict())
else:
networks['C_EMA'].load_state_dict(networks['C'].state_dict())
if 'D' in args.to_train:
opts['D'] = torch.optim.RMSprop(
networks['D'].module.parameters() if args.distributed else networks['D'].parameters(),
1e-4, weight_decay=0.0001)
if 'G' in args.to_train:
opts['G'] = torch.optim.RMSprop(
networks['G'].module.parameters() if args.distributed else networks['G'].parameters(),
1e-4, weight_decay=0.0001)
return networks, opts
def load_model(args, networks, opts):
if args.load_model is not None:
check_load = open(os.path.join(args.log_dir, "checkpoint.txt"), 'r')
to_restore = check_load.readlines()[-1].strip()
load_file = os.path.join(args.log_dir, to_restore)
if os.path.isfile(load_file):
print("=> loading checkpoint '{}'".format(load_file))
checkpoint = torch.load(load_file, map_location='cpu')
args.start_epoch = checkpoint['epoch']
if not args.multiprocessing_distributed:
for name, net in networks.items():
tmp_keys = next(iter(checkpoint[name + '_state_dict'].keys()))
if 'module' in tmp_keys:
tmp_new_dict = OrderedDict()
for key, val in checkpoint[name + '_state_dict'].items():
tmp_new_dict[key[7:]] = val
net.load_state_dict(tmp_new_dict)
networks[name] = net
else:
net.load_state_dict(checkpoint[name + '_state_dict'])
networks[name] = net
for name, opt in opts.items():
opt.load_state_dict(checkpoint[name.lower() + '_optimizer'])
opts[name] = opt
print("=> loaded checkpoint '{}' (epoch {})"
.format(load_file, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.log_dir))
def get_loader(args, dataset):
train_dataset = dataset['train']
val_dataset = dataset['val']
print(len(val_dataset))
train_dataset_ = train_dataset['TRAIN']
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset_)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset_, batch_size=args.batch_size,
shuffle=(train_sampler is None), num_workers=args.workers,
pin_memory=True, sampler=train_sampler, drop_last=False)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch, shuffle=True,
num_workers=0, pin_memory=True, drop_last=False)
val_loader = {'VAL': val_loader, 'VALSET': val_dataset, 'TRAINSET': train_dataset['FULL']}
return train_loader, val_loader, train_sampler
def map_exec_func(args):
if args.train_mode == 'GAN':
trainFunc = trainGAN
validationFunc = validateUN
return trainFunc, validationFunc
def save_model(args, epoch, networks, opts):
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % args.ngpus_per_node == 0):
check_list = open(os.path.join(args.log_dir, "checkpoint.txt"), "a+")
# if (epoch + 1) % (args.epochs//10) == 0:
with torch.no_grad():
save_dict = {}
save_dict['epoch'] = epoch + 1
for name, net in networks.items():
save_dict[name+'_state_dict'] = net.state_dict()
if name in ['G_EMA', 'C_EMA']:
continue
save_dict[name.lower()+'_optimizer'] = opts[name].state_dict()
print("SAVE CHECKPOINT[{}] DONE".format(epoch+1))
save_checkpoint(save_dict, check_list, args.log_dir, epoch + 1)
check_list.close()
if __name__ == '__main__':
main()
| 17,653 | 43.024938 | 1,912 | py |
DG-Font | DG-Font-main/font2img.py | from PIL import Image,ImageDraw,ImageFont
import matplotlib.pyplot as plt
import os
import numpy as np
import pathlib
import argparse
parser = argparse.ArgumentParser(description='Obtaining characters from .ttf')
parser.add_argument('--ttf_path', type=str, default='../ttf_folder',help='ttf directory')
parser.add_argument('--chara', type=str, default='../chara.txt',help='characters')
parser.add_argument('--save_path', type=str, default='../save_folder',help='images directory')
parser.add_argument('--img_size', type=int, help='The size of generated images')
parser.add_argument('--chara_size', type=int, help='The size of generated characters')
args = parser.parse_args()
file_object = open(args.chara,encoding='utf-8')
try:
characters = file_object.read()
finally:
file_object.close()
def draw_single_char(ch, font, canvas_size, x_offset, y_offset):
img = Image.new("RGB", (canvas_size, canvas_size), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((x_offset, y_offset), ch, (0, 0, 0), font=font)
return img
def draw_example(ch, src_font, canvas_size, x_offset, y_offset):
src_img = draw_single_char(ch, src_font, canvas_size, x_offset, y_offset)
example_img = Image.new("RGB", (canvas_size, canvas_size), (255, 255, 255))
example_img.paste(src_img, (0, 0))
return example_img
data_dir = args.ttf_path
data_root = pathlib.Path(data_dir)
print(data_root)
all_image_paths = list(data_root.glob('*.ttf*'))
all_image_paths = [str(path) for path in all_image_paths]
print(len(all_image_paths))
for i in range (len(all_image_paths)):
print(all_image_paths[i])
seq = list()
for (label,item) in zip(range(len(all_image_paths)),all_image_paths):
src_font = ImageFont.truetype(item, size = args.chara_size)
for (chara,cnt) in zip(characters, range(len(characters))):
img = draw_example(chara, src_font, args.img_size, (args.img_size-args.chara_size)/2, (args.img_size-args.chara_size)/2)
path_full = os.path.join(args.save_path, 'id_%d'%label)
if not os.path.exists(path_full):
os.mkdir(path_full)
img.save(os.path.join(path_full, "%04d.png" % (cnt)))
| 2,167 | 37.714286 | 128 | py |
DG-Font | DG-Font-main/functions/modulated_deform_conv_func.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import torch
from torch import nn
from torch.autograd import Function
from torch.nn.modules.utils import _pair
from torch.autograd.function import once_differentiable
import DCN
class ModulatedDeformConvFunction(Function):
@staticmethod
def forward(ctx, input, offset, mask, weight, bias,
stride, padding, dilation, groups, deformable_groups, im2col_step):
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.kernel_size = _pair(weight.shape[2:4])
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
output = DCN.modulated_deform_conv_forward(input, weight, bias,
offset, mask,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.groups,
ctx.deformable_groups,
ctx.im2col_step)
ctx.save_for_backward(input, offset, mask, weight, bias)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input, grad_offset, grad_mask, grad_weight, grad_bias = \
DCN.modulated_deform_conv_backward(input, weight,
bias,
offset, mask,
grad_output,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.groups,
ctx.deformable_groups,
ctx.im2col_step)
return grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\
None, None, None, None, None, None
| 2,484 | 42.596491 | 83 | py |
DG-Font | DG-Font-main/functions/__init__.py | from .modulated_deform_conv_func import ModulatedDeformConvFunction
| 68 | 33.5 | 67 | py |
DG-Font | DG-Font-main/tools/utils.py | import os
import torch
class Logger(object):
def __init__(self, log_dir):
self.last = None
def scalar_summary(self, tag, value, step):
if self.last and self.last['step'] != step:
print(self.last)
self.last = None
if self.last is None:
self.last = {'step':step,'iter':step,'epoch':1}
self.last[tag] = value
def images_summary(self, tag, images, step, nrow=8):
"""Log a list of images."""
self.viz.images(
images,
opts=dict(title='%s/%d' % (tag, step), caption='%s/%d' % (tag, step)),
nrow=nrow
)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def save_checkpoint(state, check_list, log_dir, epoch=0):
check_file = os.path.join(log_dir, 'model_{}.ckpt'.format(epoch))
torch.save(state, check_file)
check_list.write('model_{}.ckpt\n'.format(epoch))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def add_logs(args, logger, tag, value, step):
logger.add_scalar(tag, value, step)
| 1,968 | 25.608108 | 88 | py |
DG-Font | DG-Font-main/tools/ops.py | from torch import autograd
import torch
import torch.distributed as dist
from torch.nn import functional as F
def compute_grad_gp(d_out, x_in, is_patch=False):
batch_size = x_in.size(0)
grad_dout = autograd.grad(
outputs=d_out.sum() if not is_patch else d_out.mean(), inputs=x_in,
create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_in.size())
reg = grad_dout2.sum() / batch_size
return reg
def compute_grad_gp_wgan(D, x_real, x_fake, gpu):
alpha = torch.rand(x_real.size(0), 1, 1, 1).cuda(gpu)
x_interpolate = ((1 - alpha) * x_real + alpha * x_fake).detach()
x_interpolate.requires_grad = True
d_inter_logit = D(x_interpolate)
grad = torch.autograd.grad(d_inter_logit, x_interpolate,
grad_outputs=torch.ones_like(d_inter_logit), create_graph=True)[0]
norm = grad.view(grad.size(0), -1).norm(p=2, dim=1)
d_gp = ((norm - 1) ** 2).mean()
return d_gp
def update_average(model_tgt, model_src, beta=0.999):
with torch.no_grad():
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert (p_src is not p_tgt)
p_tgt.copy_(beta * p_tgt + (1. - beta) * p_src)
def copy_norm_params(model_tgt, model_src):
with torch.no_grad():
src_state_dict = model_src.state_dict()
tgt_state_dict = model_tgt.state_dict()
names = [name for name, _ in model_tgt.named_parameters()]
for n in names:
del src_state_dict[n]
tgt_state_dict.update(src_state_dict)
model_tgt.load_state_dict(tgt_state_dict)
def calc_iic_loss(x_out, x_tf_out, lamb=1.0, EPS=1e-10):
# has had softmax applied
_, k = x_out.size()
p_i_j = compute_joint(x_out, x_tf_out)
assert (p_i_j.size() == (k, k))
p_i = p_i_j.sum(dim=1).view(k, 1).expand(k, k)
p_j = p_i_j.sum(dim=0).view(1, k).expand(k,
k) # but should be same, symmetric
# avoid NaN losses. Effect will get cancelled out by p_i_j tiny anyway
p_i_j[(p_i_j < EPS).data] = EPS
p_j[(p_j < EPS).data] = EPS
p_i[(p_i < EPS).data] = EPS
loss = - p_i_j * (torch.log(p_i_j) \
- lamb * torch.log(p_j) \
- lamb * torch.log(p_i))
loss = loss.sum()
return loss
def compute_joint(x_out, x_tf_out):
# produces variable that requires grad (since args require grad)
bn, k = x_out.size()
assert (x_tf_out.size(0) == bn and x_tf_out.size(1) == k)
p_i_j = x_out.unsqueeze(2) * x_tf_out.unsqueeze(1) # bn, k, k
p_i_j = p_i_j.sum(dim=0) # k, k
p_i_j = (p_i_j + p_i_j.t()) / 2. # symmetrise
p_i_j = p_i_j / p_i_j.sum() # normalise
return p_i_j
def calc_recon_loss(predict, target):
return torch.mean(torch.abs(predict - target))
def calc_contrastive_loss(args, query, key, queue, temp=0.07):
N = query.shape[0]
K = queue.shape[0]
zeros = torch.zeros(N, dtype=torch.long).cuda(args.gpu)
key = key.detach()
logit_pos = torch.bmm(query.view(N, 1, -1), key.view(N, -1, 1))
logit_neg = torch.mm(query.view(N, -1), queue.t().view(-1, K))
logit = torch.cat([logit_pos.view(N, 1), logit_neg], dim=1)
loss = F.cross_entropy(logit / temp, zeros)
return loss
def calc_adv_loss(logit, mode):
assert mode in ['d_real', 'd_fake', 'g']
if mode == 'd_real':
loss = F.relu(1.0 - logit).mean()
elif mode == 'd_fake':
loss = F.relu(1.0 + logit).mean()
else:
loss = -logit.mean()
return loss
def queue_data(data, k):
return torch.cat([data, k], dim=0)
def dequeue_data(data, K=1024):
if len(data) > K:
return data[-K:]
else:
return data
def initialize_queue(model_k, device, train_loader, feat_size=128):
queue = torch.zeros((0, feat_size), dtype=torch.float)
queue = queue.to(device)
for _, (data, _) in enumerate(train_loader):
x_k = data[1]
x_k = x_k.cuda(device)
outs = model_k(x_k)
k = outs['cont']
k = k.detach()
queue = queue_data(queue, k)
queue = dequeue_data(queue, K=1024)
break
return queue
def average_gradients(model):
size = float(dist.get_world_size())
for param in model.parameters():
# Handle unused parameters
if param.grad is None:
continue
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
| 4,639 | 28.367089 | 97 | py |
DG-Font | DG-Font-main/modules/modulated_deform_conv.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import math
from torch import nn
from torch.nn import init
from torch.nn.modules.utils import _pair
from functions.modulated_deform_conv_func import ModulatedDeformConvFunction
class ModulatedDeformConv(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True):
super(ModulatedDeformConv, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels {} must be divisible by groups {}'.format(in_channels, groups))
if out_channels % groups != 0:
raise ValueError('out_channels {} must be divisible by groups {}'.format(out_channels, groups))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.im2col_step = im2col_step
self.use_bias = bias
self.weight = nn.Parameter(torch.Tensor(
out_channels, in_channels//groups, *self.kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
if not self.use_bias:
self.bias.requires_grad = False
def reset_parameters(self):
n = self.in_channels
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input, offset, mask):
assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
offset.shape[1]
assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
mask.shape[1]
return ModulatedDeformConvFunction.apply(input, offset, mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
self.im2col_step)
_ModulatedDeformConv = ModulatedDeformConvFunction.apply
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding,
dilation=1, groups=1, deformable_groups=1, double=False, im2col_step=64, bias=True, lr_mult=0.1):
super(ModulatedDeformConvPack, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias)
out_channels = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1]
if double == False:
self.conv_offset_mask = nn.Conv2d(self.in_channels,
out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
bias=True)
else:
self.conv_offset_mask = nn.Conv2d(self.in_channels*2,
out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
bias=True)
self.conv_offset_mask.lr_mult = lr_mult
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input_offset, input_real):
out = self.conv_offset_mask(input_offset)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return ModulatedDeformConvFunction.apply(input_real, offset, mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
self.im2col_step), offset
| 5,186 | 44.902655 | 119 | py |
DG-Font | DG-Font-main/modules/__init__.py | from .modulated_deform_conv import ModulatedDeformConv, _ModulatedDeformConv, ModulatedDeformConvPack | 101 | 101 | 101 | py |
DG-Font | DG-Font-main/models/guidingNet.py | from torch import nn
import torch.nn.functional as F
try:
from models.blocks import Conv2dBlock, FRN
except:
from blocks import Conv2dBlock, FRN
cfg = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'vgg19cut': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'N'],
}
class GuidingNet(nn.Module):
def __init__(self, img_size=64, output_k={'cont': 128, 'disc': 10}):
super(GuidingNet, self).__init__()
# network layers setting
self.features = make_layers(cfg['vgg11'], True)
self.disc = nn.Linear(512, output_k['disc'])
self.cont = nn.Linear(512, output_k['cont'])
self._initialize_weights()
def forward(self, x, sty=False):
x = self.features(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
flat = x.view(x.size(0), -1)
cont = self.cont(flat)
if sty:
return cont
disc = self.disc(flat)
return {'cont': cont, 'disc': disc}
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def moco(self, x):
x = self.features(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
flat = x.view(x.size(0), -1)
cont = self.cont(flat)
return cont
def iic(self, x):
x = self.features(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
flat = x.view(x.size(0), -1)
disc = self.disc(flat)
return disc
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
return nn.Sequential(*layers)
if __name__ == '__main__':
import torch
C = GuidingNet(64)
x_in = torch.randn(4, 3, 64, 64)
sty = C.moco(x_in)
cls = C.iic(x_in)
print(sty.shape, cls.shape)
| 2,977 | 31.725275 | 120 | py |
DG-Font | DG-Font-main/models/discriminator.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.nn.init as init
import math
try:
from models.blocks import FRN, ActFirstResBlk
except:
from blocks import FRN, ActFirstResBlk
class Discriminator(nn.Module):
"""Discriminator: (image x, domain y) -> (logit out)."""
def __init__(self, image_size=256, num_domains=2, max_conv_dim=1024):
super(Discriminator, self).__init__()
dim_in = 64 if image_size < 256 else 32
blocks = []
blocks += [nn.Conv2d(3, dim_in, 3, 1, 1)]
repeat_num = int(np.log2(image_size)) - 2
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ActFirstResBlk(dim_in, dim_in, downsample=False)]
blocks += [ActFirstResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 4, 1, 0)]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, num_domains, 1, 1, 0)]
self.main = nn.Sequential(*blocks)
self.apply(weights_init('kaiming'))
def forward(self, x, y):
"""
Inputs:
- x: images of shape (batch, 3, image_size, image_size).
- y: domain indices of shape (batch).
Output:
- out: logits of shape (batch).
"""
out = self.main(x)
feat = out
out = out.view(out.size(0), -1) # (batch, num_domains)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
out = out[idx, y] # (batch)
return out, feat
def _initialize_weights(self, mode='fan_in'):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode=mode, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
if __name__ == '__main__':
D = Discriminator(64, 10)
x_in = torch.randn(4, 3, 64, 64)
y_in = torch.randint(0, 10, size=(4, ))
out, feat = D(x_in, y_in)
print(out.shape, feat.shape)
| 3,132 | 34.202247 | 87 | py |
DG-Font | DG-Font-main/models/inception.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = models.inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception
class FIDInceptionA(models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
| 11,623 | 36.376206 | 126 | py |
DG-Font | DG-Font-main/models/generator.py | from torch import nn
import torch
import torch.nn.functional as F
import torch.nn.init as init
import scipy.io as io
import math
import numpy as np
try:
from models.blocks import LinearBlock, Conv2dBlock, ResBlocks
except:
from blocks import LinearBlock, Conv2dBlock, ResBlocks
import sys
sys.path.append('..')
from modules import modulated_deform_conv
class Generator(nn.Module):
def __init__(self, img_size=80, sty_dim=64, n_res=2, use_sn=False):
super(Generator, self).__init__()
print("Init Generator")
self.nf = 64
self.nf_mlp = 256
self.decoder_norm = 'adain'
self.adaptive_param_getter = get_num_adain_params
self.adaptive_param_assign = assign_adain_params
print("GENERATOR NF : ", self.nf)
s0 = 16
n_downs = 2
nf_dec = 256
self.cnt_encoder = ContentEncoder(self.nf, n_downs, n_res, 'in', 'relu', 'reflect')
self.decoder = Decoder(nf_dec, sty_dim, n_downs, n_res, self.decoder_norm, self.decoder_norm, 'relu', 'reflect', use_sn=use_sn)
self.mlp = MLP(sty_dim, self.adaptive_param_getter(self.decoder), self.nf_mlp, 3, 'none', 'relu')
self.apply(weights_init('kaiming'))
def forward(self, x_src, s_ref):
c_src, skip1, skip2 = self.cnt_encoder(x_src)
x_out = self.decode(c_src, s_ref, skip1, skip2)
return x_out
def decode(self, cnt, sty, skip1, skip2):
adapt_params = self.mlp(sty)
self.adaptive_param_assign(adapt_params, self.decoder)
out = self.decoder(cnt, skip1, skip2)
return out
def _initialize_weights(self, mode='fan_in'):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode=mode, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
class Decoder(nn.Module):
def __init__(self, nf_dec, sty_dim, n_downs, n_res, res_norm, dec_norm, act, pad, use_sn=False):
super(Decoder, self).__init__()
print("Init Decoder")
nf = nf_dec
self.model = nn.ModuleList()
self.model.append(ResBlocks(n_res, nf, res_norm, act, pad, use_sn=use_sn))
self.model.append(nn.Upsample(scale_factor=2))
self.model.append(Conv2dBlock(nf, nf//2, 5, 1, 2, norm=dec_norm, act=act, pad_type=pad, use_sn=use_sn))
nf //= 2
self.model.append(nn.Upsample(scale_factor=2))
self.model.append(Conv2dBlock(2*nf, nf//2, 5, 1, 2, norm=dec_norm, act=act, pad_type=pad, use_sn=use_sn))
nf //= 2
self.model.append(Conv2dBlock(2*nf, 3, 7, 1, 3, norm='none', act='tanh', pad_type=pad, use_sn=use_sn))
self.model = nn.Sequential(*self.model)
self.dcn = modulated_deform_conv.ModulatedDeformConvPack(64, 64, kernel_size=(3, 3), stride=1, padding=1, groups=1, deformable_groups=1, double=True).cuda()
self.dcn_2 = modulated_deform_conv.ModulatedDeformConvPack(128, 128, kernel_size=(3, 3), stride=1, padding=1, groups=1, deformable_groups=1, double=True).cuda()
def forward(self, x, skip1, skip2):
output = x
for i in range(len(self.model)):
output = self.model[i](output)
if i == 2:
deformable_concat = torch.cat((output,skip2), dim=1)
concat_pre, offset2 = self.dcn_2(deformable_concat, skip2)
output = torch.cat((concat_pre,output), dim=1)
if i == 4:
deformable_concat = torch.cat((output,skip1), dim=1)
concat_pre, offset1 = self.dcn(deformable_concat, skip1)
output = torch.cat((concat_pre,output), dim=1)
offset_sum1 = torch.mean(torch.abs(offset1))
offset_sum2 = torch.mean(torch.abs(offset2))
offset_sum = (offset_sum1+offset_sum2)/2
return output, offset_sum
class ContentEncoder(nn.Module):
def __init__(self, nf_cnt, n_downs, n_res, norm, act, pad, use_sn=False):
super(ContentEncoder, self).__init__()
print("Init ContentEncoder")
nf = nf_cnt
self.model = nn.ModuleList()
self.model.append(ResBlocks(n_res, 256, norm=norm, act=act, pad_type=pad, use_sn=use_sn))
self.model = nn.Sequential(*self.model)
self.dcn1 = modulated_deform_conv.ModulatedDeformConvPack(3, 64, kernel_size=(7, 7), stride=1, padding=3, groups=1, deformable_groups=1).cuda()
self.dcn2 = modulated_deform_conv.ModulatedDeformConvPack(64, 128, kernel_size=(4, 4), stride=2, padding=1, groups=1, deformable_groups=1).cuda()
self.dcn3 = modulated_deform_conv.ModulatedDeformConvPack(128, 256, kernel_size=(4, 4), stride=2, padding=1, groups=1, deformable_groups=1).cuda()
self.IN1 = nn.InstanceNorm2d(64)
self.IN2 = nn.InstanceNorm2d(128)
self.IN3 = nn.InstanceNorm2d(256)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x, _ = self.dcn1(x, x)
x = self.IN1(x)
x = self.activation(x)
skip1 = x
x, _ = self.dcn2(x, x)
x = self.IN2(x)
x = self.activation(x)
skip2 = x
x, _ = self.dcn3(x, x)
x = self.IN3(x)
x = self.activation(x)
x = self.model(x)
return x, skip1, skip2
class MLP(nn.Module):
def __init__(self, nf_in, nf_out, nf_mlp, num_blocks, norm, act, use_sn=False):
super(MLP, self).__init__()
self.model = nn.ModuleList()
nf = nf_mlp
self.model.append(LinearBlock(nf_in, nf, norm=norm, act=act, use_sn=use_sn))
for _ in range(num_blocks - 2):
self.model.append(LinearBlock(nf, nf, norm=norm, act=act, use_sn=use_sn))
self.model.append(LinearBlock(nf, nf_out, norm='none', act='none', use_sn=use_sn))
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
def assign_adain_params(adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaIN2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaIN2d":
num_adain_params += 2*m.num_features
return num_adain_params
| 7,688 | 38.229592 | 168 | py |
DG-Font | DG-Font-main/models/blocks.py | import torch
import torch.nn.functional as F
from torch import nn
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm, act, pad_type, use_sn=False):
super(ResBlocks, self).__init__()
self.model = nn.ModuleList()
for i in range(num_blocks):
self.model.append(ResBlock(dim, norm=norm, act=act, pad_type=pad_type, use_sn=use_sn))
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ResBlock(nn.Module):
def __init__(self, dim, norm='in', act='relu', pad_type='zero', use_sn=False):
super(ResBlock, self).__init__()
self.model = nn.Sequential(Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
act=act,
pad_type=pad_type, use_sn=use_sn),
Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
act='none',
pad_type=pad_type, use_sn=use_sn))
def forward(self, x):
x_org = x
residual = self.model(x)
out = x_org + 0.1 * residual
return out
class ActFirstResBlk(nn.Module):
def __init__(self, dim_in, dim_out, downsample=True):
super(ActFirstResBlk, self).__init__()
self.norm1 = FRN(dim_in)
self.norm2 = FRN(dim_in)
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
self.downsample = downsample
self.learned_sc = (dim_in != dim_out)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
x = self.norm1(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
x = self.norm2(x)
x = self.conv2(x)
return x
def forward(self, x):
return torch.rsqrt(torch.tensor(2.0)) * self._shortcut(x) + torch.rsqrt(torch.tensor(2.0)) * self._residual(x)
class LinearBlock(nn.Module):
def __init__(self, in_dim, out_dim, norm='none', act='relu', use_sn=False):
super(LinearBlock, self).__init__()
use_bias = True
self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
if use_sn:
self.fc = nn.utils.spectral_norm(self.fc)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if act == 'relu':
self.activation = nn.ReLU(inplace=True)
elif act == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif act == 'tanh':
self.activation = nn.Tanh()
elif act == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(act)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0,
norm='none', act='relu', pad_type='zero',
use_bias=True, use_sn=False):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaIN2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if act == 'relu':
self.activation = nn.ReLU(inplace=True)
elif act == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif act == 'tanh':
self.activation = nn.Tanh()
elif act == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(act)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
if use_sn:
self.conv = nn.utils.spectral_norm(self.conv)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class FRN(nn.Module):
def __init__(self, num_features, eps=1e-6):
super(FRN, self).__init__()
self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.gamma = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.eps = eps
def forward(self, x):
x = x * torch.rsqrt(torch.mean(x**2, dim=[2, 3], keepdim=True) + self.eps)
return torch.max(self.gamma * x + self.beta, self.tau)
class AdaIN2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=False, track_running_stats=True):
super(AdaIN2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_features))
self.bias = nn.Parameter(torch.Tensor(num_features))
else:
self.weight = None
self.bias = None
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
else:
self.register_buffer('running_mean', None)
self.register_buffer('running_var', None)
def forward(self, x):
assert self.weight is not None and self.bias is not None, "AdaIN params are None"
N, C, H, W = x.size()
running_mean = self.running_mean.repeat(N)
running_var = self.running_var.repeat(N)
x_ = x.contiguous().view(1, N * C, H * W)
normed = F.batch_norm(x_, running_mean, running_var,
self.weight, self.bias,
True, self.momentum, self.eps)
return normed.view(N, C, H, W)
def __repr__(self):
return self.__class__.__name__ + '(num_features=' + str(self.num_features) + ')'
if __name__ == '__main__':
print("CALL blocks.py")
| 7,518 | 33.64977 | 118 | py |
DG-Font | DG-Font-main/datasets/custom_dataset.py | import torch.utils.data as data
from PIL import Image
import os
import os.path
import sys
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def is_image_file(filename):
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
class DatasetFolder(data.Dataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (list[string]): A list of allowed extensions.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, loader, extensions, transform=None, target_transform=None):
classes, class_to_idx = self._find_classes(root)
samples = make_dataset(root, class_to_idx, extensions)
if len(samples) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self.transform = transform
self.target_transform = target_transform
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
classes.sort(key= lambda x:int(x[3:]))
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
imgname = path.split('/')[-1].replace('.JPEG', '')
return sample, target, imgname
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', 'webp']
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolerRemap(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, remap_table=None, with_idx=False):
super(ImageFolerRemap, self).__init__(root, loader, IMG_EXTENSIONS, transform=transform, target_transform=target_transform)
self.imgs = self.samples
self.class_table = remap_table
self.with_idx = with_idx
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
target = self.class_table[target]
if self.with_idx:
return sample, index, target
return sample, target
class CrossdomainFolder(data.Dataset):
def __init__(self, root, data_to_use=['photo', 'monet'], transform=None, loader=default_loader, extensions='jpg'):
self.data_to_use = data_to_use
classes, class_to_idx = self._find_classes(root)
samples = make_dataset(root, class_to_idx, extensions)
if len(samples) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self.transform = transform
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir() and d.name in self.data_to_use]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and d in self.data_to_use]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
return sample, target
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
| 9,235 | 33.207407 | 131 | py |
DG-Font | DG-Font-main/datasets/datasetgetter.py | import torch
from torchvision.datasets import ImageFolder
import os
import torchvision.transforms as transforms
from datasets.custom_dataset import ImageFolerRemap, CrossdomainFolder
class Compose(object):
def __init__(self, tf):
self.tf = tf
def __call__(self, img):
for t in self.tf:
img = t(img)
return img
def get_dataset(args):
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
normalize = transforms.Normalize(mean=mean, std=std)
transform = Compose([transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
normalize])
transform_val = Compose([transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
normalize])
class_to_use = args.att_to_use
print('USE CLASSES', class_to_use)
# remap labels
remap_table = {}
i = 0
for k in class_to_use:
remap_table[k] = i
i += 1
print("LABEL MAP:", remap_table)
img_dir = args.data_dir
dataset = ImageFolerRemap(img_dir, transform=transform, remap_table=remap_table)
valdataset = ImageFolerRemap(img_dir, transform=transform_val, remap_table=remap_table)
# parse classes to use
tot_targets = torch.tensor(dataset.targets)
min_data = 99999999
max_data = 0
train_idx = None
val_idx = None
for k in class_to_use:
tmp_idx = (tot_targets == k).nonzero()
train_tmp_idx = tmp_idx[:-args.val_num]
val_tmp_idx = tmp_idx[-args.val_num:]
if k == class_to_use[0]:
train_idx = train_tmp_idx.clone()
val_idx = val_tmp_idx.clone()
else:
train_idx = torch.cat((train_idx, train_tmp_idx))
val_idx = torch.cat((val_idx, val_tmp_idx))
if min_data > len(train_tmp_idx):
min_data = len(train_tmp_idx)
if max_data < len(train_tmp_idx):
max_data = len(train_tmp_idx)
train_dataset = torch.utils.data.Subset(dataset, train_idx)
val_dataset = torch.utils.data.Subset(valdataset, val_idx)
args.min_data = min_data
args.max_data = max_data
print("MINIMUM DATA :", args.min_data)
print("MAXIMUM DATA :", args.max_data)
train_dataset = {'TRAIN': train_dataset, 'FULL': dataset}
return train_dataset, val_dataset
| 2,435 | 27.325581 | 91 | py |
DG-Font | DG-Font-main/train/train.py | from tqdm import trange
import torch.nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from tools.utils import *
from tools.ops import compute_grad_gp, update_average, copy_norm_params, queue_data, dequeue_data, \
average_gradients, calc_adv_loss, calc_contrastive_loss, calc_recon_loss
def trainGAN(data_loader, networks, opts, epoch, args, additional):
# avg meter
d_losses = AverageMeter()
d_advs = AverageMeter()
d_gps = AverageMeter()
g_losses = AverageMeter()
g_advs = AverageMeter()
g_imgrecs = AverageMeter()
g_rec = AverageMeter()
moco_losses = AverageMeter()
# set nets
D = networks['D']
G = networks['G'] if not args.distributed else networks['G'].module
C = networks['C'] if not args.distributed else networks['C'].module
G_EMA = networks['G_EMA'] if not args.distributed else networks['G_EMA'].module
C_EMA = networks['C_EMA'] if not args.distributed else networks['C_EMA'].module
# set opts
d_opt = opts['D']
g_opt = opts['G']
c_opt = opts['C']
# switch to train mode
D.train()
G.train()
C.train()
C_EMA.train()
G_EMA.train()
logger = additional['logger']
# summary writer
train_it = iter(data_loader)
t_train = trange(0, args.iters, initial=0, total=args.iters)
for i in t_train:
try:
imgs, y_org = next(train_it)
except:
train_it = iter(data_loader)
imgs, y_org = next(train_it)
x_org = imgs
x_ref_idx = torch.randperm(x_org.size(0))
x_org = x_org.cuda(args.gpu)
y_org = y_org.cuda(args.gpu)
x_ref_idx = x_ref_idx.cuda(args.gpu)
x_ref = x_org.clone()
x_ref = x_ref[x_ref_idx]
training_mode = 'GAN'
####################
# BEGIN Train GANs #
####################
with torch.no_grad():
y_ref = y_org.clone()
y_ref = y_ref[x_ref_idx]
s_ref = C.moco(x_ref)
c_src, skip1, skip2 = G.cnt_encoder(x_org)
x_fake, _ = G.decode(c_src, s_ref, skip1, skip2)
x_ref.requires_grad_()
d_real_logit, _ = D(x_ref, y_ref)
d_fake_logit, _ = D(x_fake.detach(), y_ref)
d_adv_real = calc_adv_loss(d_real_logit, 'd_real')
d_adv_fake = calc_adv_loss(d_fake_logit, 'd_fake')
d_adv = d_adv_real + d_adv_fake
d_gp = args.w_gp * compute_grad_gp(d_real_logit, x_ref, is_patch=False)
d_loss = d_adv + d_gp
d_opt.zero_grad()
d_adv_real.backward(retain_graph=True)
d_gp.backward()
d_adv_fake.backward()
if args.distributed:
average_gradients(D)
d_opt.step()
# Train G
s_src = C.moco(x_org)
s_ref = C.moco(x_ref)
c_src, skip1, skip2 = G.cnt_encoder(x_org)
x_fake, offset_loss = G.decode(c_src, s_ref, skip1, skip2)
x_rec, _ = G.decode(c_src, s_src, skip1, skip2)
g_fake_logit, _ = D(x_fake, y_ref)
g_rec_logit, _ = D(x_rec, y_org)
g_adv_fake = calc_adv_loss(g_fake_logit, 'g')
g_adv_rec = calc_adv_loss(g_rec_logit, 'g')
g_adv = g_adv_fake + g_adv_rec
g_imgrec = calc_recon_loss(x_rec, x_org)
c_x_fake, _, _ = G.cnt_encoder(x_fake)
g_conrec = calc_recon_loss(c_x_fake, c_src)
g_loss = args.w_adv * g_adv + args.w_rec * g_imgrec +args.w_rec * g_conrec + args.w_off * offset_loss
g_opt.zero_grad()
c_opt.zero_grad()
g_loss.backward()
if args.distributed:
average_gradients(G)
average_gradients(C)
c_opt.step()
g_opt.step()
##################
# END Train GANs #
##################
if epoch >= args.ema_start:
training_mode = training_mode + "_EMA"
update_average(G_EMA, G)
update_average(C_EMA, C)
torch.cuda.synchronize()
with torch.no_grad():
if epoch >= args.separated:
d_losses.update(d_loss.item(), x_org.size(0))
d_advs.update(d_adv.item(), x_org.size(0))
d_gps.update(d_gp.item(), x_org.size(0))
g_losses.update(g_loss.item(), x_org.size(0))
g_advs.update(g_adv.item(), x_org.size(0))
g_imgrecs.update(g_imgrec.item(), x_org.size(0))
g_rec.update(g_conrec.item(), x_org.size(0))
moco_losses.update(offset_loss.item(), x_org.size(0))
if (i + 1) % args.log_step == 0 and (args.gpu == 0 or args.gpu == '0'):
summary_step = epoch * args.iters + i
add_logs(args, logger, 'D/LOSS', d_losses.avg, summary_step)
add_logs(args, logger, 'D/ADV', d_advs.avg, summary_step)
add_logs(args, logger, 'D/GP', d_gps.avg, summary_step)
add_logs(args, logger, 'G/LOSS', g_losses.avg, summary_step)
add_logs(args, logger, 'G/ADV', g_advs.avg, summary_step)
add_logs(args, logger, 'G/IMGREC', g_imgrecs.avg, summary_step)
add_logs(args, logger, 'G/conrec', g_rec.avg, summary_step)
add_logs(args, logger, 'C/OFFSET', moco_losses.avg, summary_step)
print('Epoch: [{}/{}] [{}/{}] MODE[{}] Avg Loss: D[{d_losses.avg:.2f}] G[{g_losses.avg:.2f}] '.format(epoch + 1, args.epochs, i+1, args.iters,
training_mode, d_losses=d_losses, g_losses=g_losses))
copy_norm_params(G_EMA, G)
copy_norm_params(C_EMA, C)
| 5,671 | 30.511111 | 158 | py |
DG-Font | DG-Font-main/validation/validation.py | import torch.nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.utils as vutils
import torch.nn.functional as F
import numpy as np
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x):
return x
from scipy import linalg
from tools.utils import *
def validateUN(data_loader, networks, epoch, args, additional=None):
# set nets
D = networks['D']
G = networks['G'] if not args.distributed else networks['G'].module
C = networks['C'] if not args.distributed else networks['C'].module
C_EMA = networks['C_EMA'] if not args.distributed else networks['C_EMA'].module
G_EMA = networks['G_EMA'] if not args.distributed else networks['G_EMA'].module
# switch to train mode
D.eval()
G.eval()
C.eval()
C_EMA.eval()
G_EMA.eval()
# data loader
val_dataset = data_loader['TRAINSET']
val_loader = data_loader['VAL']
x_each_cls = []
with torch.no_grad():
val_tot_tars = torch.tensor(val_dataset.targets)
for cls_idx in range(len(args.att_to_use)):
tmp_cls_set = (val_tot_tars == args.att_to_use[cls_idx]).nonzero()[-args.val_num:]
tmp_ds = torch.utils.data.Subset(val_dataset, tmp_cls_set)
tmp_dl = torch.utils.data.DataLoader(tmp_ds, batch_size=args.val_num, shuffle=False,
num_workers=0, pin_memory=True, drop_last=False)
tmp_iter = iter(tmp_dl)
tmp_sample = None
for sample_idx in range(len(tmp_iter)):
imgs, _ = next(tmp_iter)
x_ = imgs
if tmp_sample is None:
tmp_sample = x_.clone()
else:
tmp_sample = torch.cat((tmp_sample, x_), 0)
x_each_cls.append(tmp_sample)
if epoch >= args.fid_start:
# Reference guided
with torch.no_grad():
# Just a buffer image ( to make a grid )
ones = torch.ones(1, x_each_cls[0].size(1), x_each_cls[0].size(2), x_each_cls[0].size(3)).cuda(args.gpu, non_blocking=True)
for src_idx in range(len(args.att_to_use)):
x_src = x_each_cls[src_idx][:args.val_batch, :, :, :].cuda(args.gpu, non_blocking=True)
rnd_idx = torch.randperm(x_each_cls[src_idx].size(0))[:args.val_batch]
x_src_rnd = x_each_cls[src_idx][rnd_idx].cuda(args.gpu, non_blocking=True)
for ref_idx in range(len(args.att_to_use)):
x_res_ema = torch.cat((ones, x_src), 0)
x_rnd_ema = torch.cat((ones, x_src_rnd), 0)
x_ref = x_each_cls[ref_idx][:args.val_batch, :, :, :].cuda(args.gpu, non_blocking=True)
rnd_idx = torch.randperm(x_each_cls[ref_idx].size(0))[:args.val_batch]
x_ref_rnd = x_each_cls[ref_idx][rnd_idx].cuda(args.gpu, non_blocking=True)
for sample_idx in range(args.val_batch):
x_ref_tmp = x_ref[sample_idx: sample_idx + 1].repeat((args.val_batch, 1, 1, 1))
c_src, skip1, skip2 = G_EMA.cnt_encoder(x_src)
s_ref = C_EMA(x_ref_tmp, sty=True)
x_res_ema_tmp,_ = G_EMA.decode(c_src, s_ref, skip1, skip2)
x_ref_tmp = x_ref_rnd[sample_idx: sample_idx + 1].repeat((args.val_batch, 1, 1, 1))
c_src, skip1, skip2 = G_EMA.cnt_encoder(x_src_rnd)
s_ref = C_EMA(x_ref_tmp, sty=True)
x_rnd_ema_tmp,_ = G_EMA.decode(c_src, s_ref, skip1, skip2)
x_res_ema_tmp = torch.cat((x_ref[sample_idx: sample_idx + 1], x_res_ema_tmp), 0)
x_res_ema = torch.cat((x_res_ema, x_res_ema_tmp), 0)
x_rnd_ema_tmp = torch.cat((x_ref_rnd[sample_idx: sample_idx + 1], x_rnd_ema_tmp), 0)
x_rnd_ema = torch.cat((x_rnd_ema, x_rnd_ema_tmp), 0)
vutils.save_image(x_res_ema, os.path.join(args.res_dir, '{}_EMA_{}_{}{}.jpg'.format(args.gpu, epoch+1, src_idx, ref_idx)), normalize=True,
nrow=(x_res_ema.size(0) // (x_src.size(0) + 2) + 1))
vutils.save_image(x_rnd_ema, os.path.join(args.res_dir, '{}_RNDEMA_{}_{}{}.jpg'.format(args.gpu, epoch+1, src_idx, ref_idx)), normalize=True,
nrow=(x_res_ema.size(0) // (x_src.size(0) + 2) + 1))
| 4,643 | 46.387755 | 161 | py |
FPConv | FPConv-master/tools/test_scannet.py | import torch
from torch.utils.data import DataLoader
import numpy as np
import argparse
import importlib
import os
import sys
import json
from utils.switchnorm import convert_sn
from datasets.scannet_dataset_rgb_test import ScannetDatasetWholeScene_evaluation
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='6,7')
parser.add_argument("--batch_size", type=int, default=48)
parser.add_argument("--with_rgb", action='store_true', default=False)
parser.add_argument("--with_norm", action='store_true', default=False)
parser.add_argument("--use_sn", action='store_true', default=False)
parser.add_argument("--model", type=str, default='fpcnn_scannet_tiny_v3')
parser.add_argument("--weight_dir", type=str, default=None)
parser.add_argument("--save_dir", type=str, default=None)
parser.add_argument("--config", type=str, default='./config.json')
parser.add_argument("--skip_exist", type=bool, default=False)
parser.add_argument("--num_points", type=int, default=8192)
parser.add_argument("--mode", type=str, default='eval')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print(args)
# load config files
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
NUM_CLASSES = 21
NUM_POINTS = args.num_points # 8192 # 10240 + 1024
SEM_LABELS = None
class_dict = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) # 21 (0: unknown)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
print("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
print("==> Done")
else:
print(filename)
raise FileNotFoundError
return epoch
def vote(predict, vote_num, pred, points_idx):
''' numpy array
:param predict: (pn,21) float
:param vote_num: (pn,1) int
:param pred: (bs,np,21) float
:param points_idx: (bs,np) int
'''
bs, np = points_idx.shape
for i in range(bs):
for j in range(np):
pred_ = pred[i, j, :] # 21
pidx_ = points_idx[i, j] # int
predict[pidx_, :] += pred_
vote_num[pidx_, 0] += 1
return predict, vote_num
def write_to_file(path, probs):
'''
:param path: path to save predicted label
:param probs: N,22
'''
file_name = path + ('.txt' if args.mode == 'test' else '.npy')
if args.skip_exist and os.path.isfile(file_name):
print(' -- file exists, skip', file_name)
return
if args.mode == 'test':
predict = np.argmax(probs[:, 1:], axis=1) # pn
predict += 1
predict = class_dict[predict]
with open(file_name, 'w') as f:
f.write(str(predict[0]))
for pred in predict[1:]:
f.write('\n{}'.format(pred))
else:
np.save(file_name, probs)
print(' -- save file to ====>', file_name)
def test(model, dst_loader, pn_list, scene_list):
'''
:param pn_list: sn (list => int), the number of points in a scene
:param scene_list: sn (list => str), scene id
'''
model.eval()
total_seen = 0
total_correct = 0
total_seen_class = [0] * NUM_CLASSES
total_correct_class = [0] * NUM_CLASSES
total_iou_deno_class = [0] * NUM_CLASSES
scene_num = len(scene_list)
for scene_index in range(scene_num):
print(' ======= {}/{} ======= '.format(scene_index, scene_num))
# scene_index = 0
scene_id = scene_list[scene_index]
point_num = pn_list[scene_index]
predict = np.zeros((point_num, NUM_CLASSES), dtype=np.float32) # pn,21
vote_num = np.zeros((point_num, 1), dtype=np.int) # pn,1
for batch_data in dst_loader:
pc, seg, smpw, pidx= batch_data
pc = pc.cuda().float()
pred = model(pc) # B,N,C
pred = torch.nn.functional.softmax(pred, dim=2)
pred = pred.cpu().detach().numpy()
pidx = pidx.numpy() # B,N
predict, vote_num = vote(predict, vote_num, pred, pidx)
predict = predict / vote_num
if args.save_dir is not None:
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
save_path = os.path.join(args.save_dir, '{}'.format(scene_id))
write_to_file(save_path, predict)
if args.mode != 'test':
predict = np.argmax(predict[:, 1:], axis=1) # pn
predict += 1
labels = SEM_LABELS[scene_index]
total_seen += np.sum(labels > 0) # point_num
total_correct += np.sum((predict == labels) & (labels > 0))
print('accuracy: ', total_correct / total_seen)
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((labels == l) & (labels > 0))
total_correct_class[l] += np.sum((predict == l) & (labels == l))
total_iou_deno_class[l] += np.sum(((predict == l) & (labels > 0)) | (labels == l))
if args.mode != 'test':
IoU = np.array(total_correct_class[1:])/(np.array(total_iou_deno_class[1:],dtype=np.float)+1e-6)
print('eval point avg class IoU: %f' % (np.mean(IoU)))
IoU_Class = 'Each Class IoU:::\n'
for i in range(IoU.shape[0]):
print('Class %d : %.4f'%(i+1, IoU[i]))
print('eval accuracy: %f'% (total_correct / float(total_seen)))
print('eval avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6))))
if __name__ == '__main__':
input_channels = 0
if args.with_rgb: input_channels += 3
if args.with_norm: input_channels += 3
# Initialize Model and Data Loader
MODEL = importlib.import_module('models.' + args.model)
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=input_channels, num_pts=args.num_points)
if args.use_sn:
print(' --- use sn')
model = convert_sn(model)
load_checkpoint(model, args.weight_dir)
model.cuda()
model = torch.nn.parallel.DataParallel(model)
test_dst = ScannetDatasetWholeScene_evaluation(root=_cfg['scannet_pickle'],
scene_list_dir=_cfg['scene_list'],
split=args.mode,
block_points=NUM_POINTS,
with_rgb=args.with_rgb,
with_norm=args.with_norm)
pn_list = test_dst.point_num
scene_list = test_dst.scene_list
SEM_LABELS = test_dst.semantic_labels_list
test_loader = DataLoader(test_dst, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=0)
with torch.no_grad():
test(model, test_loader, pn_list, scene_list)
| 7,024 | 36.367021 | 139 | py |
FPConv | FPConv-master/tools/test_s3dis.py | import os, sys
import json
import numpy as np
import argparse
import importlib
import torch
from torch.utils.data import DataLoader
from datasets.s3dis_dataset_test import S3DISWholeScene_evaluation
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='0')
parser.add_argument("--batch_size", type=int, default=12)
parser.add_argument("--model", type=str, default='fpcnn_s3dis')
parser.add_argument("--stride", type=float, default=0.5)
parser.add_argument("--block_size", type=float, default=2)
parser.add_argument("--test_area", type=int, default=5)
parser.add_argument("--num_pts", type=int, default=14564)
parser.add_argument("--weight_dir", type=str, default=None) # checkpoint path
parser.add_argument("--save_dir", type=str, default=None)
parser.add_argument("--config", type=str, default='./config.json')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print(args)
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
SEM_LABELS = None
NUM_CLASSES = 13
NUM_POINTS = args.num_pts
class_name_path = os.path.join('utils/s3dis_meta/class_names.txt')
g_classes = [x.rstrip() for x in open(class_name_path)]
class_dict = np.arange(13)
if args.save_dir is not None:
os.makedirs(args.save_dir, exist_ok=True)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
print("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
print("==> Done")
else:
raise FileNotFoundError
return epoch
def vote(predict, pred, points_idx, vote_num_point):
''' numpy array
:param predict: (pn,21) float
:param pred: (bs,np,21) float
:param points_idx: (bs,np) int
:param vote_num_point: (pn, 1) times that points are overlapped
'''
bs, np = points_idx.shape
for i in range(bs):
for j in range(np):
pred_ = pred[i, j, :] # 21
pidx_ = points_idx[i, j] # int
predict[pidx_, :] += pred_
return predict
def write_to_file(path, labels):
'''
:param path: path to save predicted label
:param labels: n (list => int)
'''
np.save(path, labels)
def test(model, dst_loader, pn_list, scene_list):
'''
:param pn_list: sn (list => int), the number of points in a scene
:param scene_list: sn (list => str), scene id
'''
model.eval()
total_seen = 0
total_correct = 0
total_seen_class = [0] * NUM_CLASSES
total_correct_class = [0] * NUM_CLASSES
total_iou_deno_class = [0] * NUM_CLASSES
scene_num = len(scene_list)
for scene_index in range(scene_num):
print(' ======= {}/{} ======= '.format(scene_index, scene_num))
scene_id = scene_list[scene_index]
point_num = pn_list[scene_index]
predict = np.zeros((point_num, NUM_CLASSES), dtype=np.float32) # pn,21
vote_num_point = np.zeros((point_num, 1), dtype=np.float32)
for batch_data in dst_loader: # tqdm(dst_loader):
pc, seg, pidx = batch_data
pc = pc.cuda().float()
with torch.no_grad():
pred = model(pc) # B,N,C
pred = torch.softmax(pred, dim=2)
pred = pred.cpu().detach().numpy()
seg = seg.data.numpy()
pidx = pidx.numpy() # B,N
predict = vote(predict, pred, pidx, vote_num_point)
predict = np.argmax(predict, axis=1)
# Save predictions
if args.save_dir is not None:
save_path = os.path.join(args.save_dir, scene_id)
write_to_file(save_path, predict)
print('Save predicted label to {}.'.format(save_path))
labels = SEM_LABELS[scene_index]
total_seen += np.sum(labels >= 0) # point_num
total_correct += np.sum((predict == labels) & (labels >= 0))
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((labels == l) & (labels >= 0))
total_correct_class[l] += np.sum((predict == l)
& (labels == l))
total_iou_deno_class[l] += np.sum(
((predict == l) & (labels >= 0)) | (labels == l))
print('Batch eval accuracy: %f' %
(total_correct / float(total_seen)))
IoU = np.array(
total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float) + 1e-6)
print('eval point avg class IoU: %f' % (np.mean(IoU)))
for i in range(IoU.shape[0]):
print('%s : %.4f' % (g_classes[i], IoU[i]))
print('eval accuracy: %f' % (total_correct / float(total_seen)))
print('eval avg class acc: %f' % (np.mean(np.array(
total_correct_class) / (np.array(total_seen_class, dtype=np.float) + 1e-6))))
if __name__ == '__main__':
# Initialize Model and Data Loader
MODEL = importlib.import_module('models.' + args.model)
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=6)
load_checkpoint(model, args.weight_dir)
model = torch.nn.parallel.DataParallel(model)
model.cuda()
test_dst = S3DISWholeScene_evaluation(root=_cfg['s3dis_data_root'],
split='test',
test_area=args.test_area,
block_points=NUM_POINTS,
block_size=args.block_size,
stride=args.stride,
with_rgb=True)
pn_list = test_dst.point_num
scene_list = test_dst.scene_list
SEM_LABELS = test_dst.semantic_labels_list
test_loader = DataLoader(test_dst, batch_size=args.batch_size,
shuffle=False, pin_memory=True, num_workers=0)
test(model, test_loader, pn_list, scene_list)
| 5,981 | 35.036145 | 87 | py |
FPConv | FPConv-master/tools/train_scannet.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.distributed as dist
import os, sys
import argparse
import importlib
import numpy as np
import json
import tensorboard_logger as tb_log
from datasets.scannet_dataset_rgb import ScannetDataset, ScannetDatasetWholeScene
from utils.saver import Saver
from utils.switchnorm import convert_sn
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='0,1')
parser.add_argument("--batch_size", type=int, default=12)
parser.add_argument("--epochs", type=int, default=300)
parser.add_argument('--workers', type=int, default=12)
parser.add_argument("--mode", type=str, default='train')
parser.add_argument("--model", type=str, default='fpcnn_scannet_tiny_v3')
parser.add_argument("--save_dir", type=str, default='logs/test_scannet_tiny')
parser.add_argument("--config", type=str, default='./config.json')
parser.add_argument("--use_sn", action='store_true', default=False)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lr_decay', type=float, default=0.1)
parser.add_argument('--lr_clip', type=float, default=0.000001)
parser.add_argument('--decay_step_list', type=list, default=[100, 200, 300])
parser.add_argument('--weight_decay', type=float, default=0.001)
parser.add_argument("--resume", type=str, default=None)
parser.add_argument("--sample_rate", type=float, default=None)
parser.add_argument("--with_rgb", action='store_true', default=False)
parser.add_argument("--with_norm", action='store_true', default=False)
parser.add_argument("--num_points", type=int, default=8192)
parser.add_argument("--accum", type=int, default=24)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# load config file
with open(args.config, 'r') as f:
_cfg = json.load(f)
NUM_CLASSES = 21
NUM_POINTS = args.num_points
saver = Saver(args.save_dir, max_files=100)
print(args)
print(_cfg)
def log_str(info):
print(info)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
log_str("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
log_str("==> Done")
else:
raise FileNotFoundError
return epoch
class CrossEntropyLossWithWeights(torch.nn.Module):
def __init__(self):
super().__init__()
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, predict, target, weights):
"""
:param predict: (B,N,C)
:param target: (B,N)
:param weights: (B,N)
:return:
"""
predict = predict.view(-1, NUM_CLASSES).contiguous() # B*N, C
target = target.view(-1).contiguous().cuda().long() # B*N
weights = weights.view(-1).contiguous().cuda().float() # B*N
loss = self.cross_entropy_loss(predict, target) # B*N
loss *= weights
loss = torch.mean(loss)
return loss
def train_one_epoch(model, dst_loader, optimizer, epoch, tb_log):
model.train()
loss_func = CrossEntropyLossWithWeights()
repeat = args.accum // args.batch_size
log_str(' --- train, accumulate gradients for {} times. Total bacth size is {}.'.format(repeat, args.accum))
loss_list = []
loss_temp_list = []
correct_temp = 0
seen_temp = 0
total_correct = 0
total_seen = 0
optimizer.zero_grad()
# for it, batch in tqdm(enumerate(dst_loader)):
for it, batch in enumerate(dst_loader):
point_set, semantic_seg, sample_weight = batch
point_set = point_set.cuda().float()
predict = model(point_set) # B,N,C
loss = loss_func(predict, semantic_seg, sample_weight)
loss_norm = loss / repeat
loss_norm.backward()
# accumulate gradient
if (it + 1) % repeat == 0 or (it + 1) == len(dst_loader):
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
# 1. loss
loss_list.append(loss.item())
# 2. accuracy
predict = torch.argmax(predict, dim=2).cpu().numpy() # B,N
semantic_seg = semantic_seg.numpy()
correct = np.sum(predict == semantic_seg)
batch_seen = predict.shape[0] * NUM_POINTS
total_correct += correct
total_seen += batch_seen
# save temp data
loss_temp_list.append(loss.item())
correct_temp += correct
seen_temp += batch_seen
if (it + 1) % 100 == 0:
log_str(' -- batch: {}/{} -- '.format(it+1, len(dst_loader)))
log_str('accuracy: {:.4f}'.format(correct_temp / seen_temp))
log_str('mean loss: {:.4f}'.format(np.mean(loss_temp_list)))
loss_temp_list = []
correct_temp = 0
seen_temp = 0
log_str(' -- epoch accuracy: {:.4f}'.format(total_correct / total_seen))
log_str(' -- epoch mean loss: {:.4f}'.format(np.mean(loss_list)))
if epoch % 5 == 0:
tb_log.log_value('epoch oA', total_correct / total_seen, epoch)
tb_log.log_value('epoch loss', np.mean(loss_list), epoch)
def eval_one_epoch(model, dst_loader, epoch, tb_log):
model.eval()
total_correct = 0
total_seen = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_iou_deno_class = [0 for _ in range(NUM_CLASSES)]
loss_func = CrossEntropyLossWithWeights()
loss_list = []
with torch.no_grad():
for it, batch in enumerate(dst_loader):
batch_data, batch_label, batch_smpw = batch
batch_data = batch_data.cuda().float()
pred_val = model(batch_data) # B,N,C
loss = loss_func(pred_val, batch_label, batch_smpw)
loss_list.append(loss.item())
# convert to numpy array
pred_val = torch.argmax(pred_val, dim=2).cpu().numpy() # B,N
batch_label = batch_label.numpy()
batch_smpw = batch_smpw.numpy()
correct = np.sum((pred_val == batch_label) & (batch_label>0) & (batch_smpw>0))
total_correct += correct
total_seen += np.sum((batch_label>0) & (batch_smpw>0))
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label==l) & (batch_smpw>0))
total_correct_class[l] += np.sum((pred_val==l) & (batch_label==l) & (batch_smpw>0))
total_iou_deno_class[l] += np.sum(((pred_val==l) | (batch_label==l)) & (batch_smpw>0) & (batch_label>0))
IoU = np.array(total_correct_class[1:])/(np.array(total_iou_deno_class[1:],dtype=np.float)+1e-6)
log_str('eval point avg class IoU: %f' % (np.mean(IoU)))
IoU_Class = 'Each Class IoU:::\n'
for i in range(IoU.shape[0]):
log_str('Class %d : %.4f'%(i+1, IoU[i]))
log_str('eval loss: %f'% (np.mean(loss_list)))
log_str('eval accuracy: %f'% (total_correct / float(total_seen)))
log_str('eval avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6))))
tb_log.log_value('Eval loss', np.mean(loss_list), epoch)
tb_log.log_value('Eval mIoU', np.mean(IoU), epoch)
tb_log.log_value('Eval oA', total_correct / float(total_seen), epoch)
tb_log.log_value('Eval mA', np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6)), epoch)
return np.mean(IoU)
def train(model, train_loader, eval_loader, tb_log, resume_epoch=0):
# optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.98,
weight_decay=args.weight_decay,
nesterov=True)
# init lr scheduler
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in args.decay_step_list:
if cur_epoch >= decay_step:
cur_decay = cur_decay * args.lr_decay
return max(cur_decay, args.lr_clip / args.lr)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lbmd)
best_miou = 0
best_epoch = 0
for epoch in range(args.epochs):
# resume training epoch
if epoch < resume_epoch: continue
elif resume_epoch > 0:
log_str('====== resume epoch {} ======'.format(epoch))
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
log_str(' === Best mIoU: {}, epoch {}. === '.format(best_miou, best_epoch))
lr_scheduler.step(epoch)
resume_epoch = 0
continue
# training
log_str('====== epoch {} ======'.format(epoch))
train_one_epoch(model, train_loader, optimizer, epoch, tb_log)
lr_scheduler.step(epoch)
# evaluate model
if (epoch > 0 and epoch % 20 == 0) or \
(epoch > 220 and epoch % 5 == 0):
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
saver.save_checkpoint(model, epoch, 'pn2_best_epoch_{}'.format(epoch))
log_str(' === Best mIoU: {}, epoch {}. === '.format(best_miou, best_epoch))
if __name__ == '__main__':
input_channels = 0
if args.with_rgb: input_channels += 3
if args.with_norm: input_channels += 3
print('model input_channel: {}.'.format(input_channels))
# model init
MODEL = importlib.import_module('models.' + args.model)
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=input_channels, num_pts=args.num_points)
if args.use_sn:
print(' --- use sn')
model = utils.convert_sn(model)
# resume
from_epoch = 0
if args.resume:
from_epoch = load_checkpoint(model, args.resume)
model = nn.parallel.DataParallel(model)
model.cuda()
# init tb_log
tb_log.configure(os.path.join(args.save_dir, 'tensorboard'))
# eval dataloader
eval_dst = ScannetDatasetWholeScene(root=_cfg['scannet_pickle'],
npoints=NUM_POINTS,
split='eval',
with_norm=args.with_norm,
with_rgb=args.with_rgb)
eval_loader = DataLoader(eval_dst, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=0)
# train dataloader
train_dst = ScannetDataset(root=_cfg['scannet_pickle'],
npoints=NUM_POINTS,
split='train' if args.mode == 'train' else 'eval',
with_dropout=True,
with_norm=args.with_norm,
with_rgb=args.with_rgb,
sample_rate=args.sample_rate)
train_loader = DataLoader(train_dst,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
num_workers=args.workers,
drop_last=True) # sync_bn will raise an unknown error with batch size of 1.
train(model, train_loader, eval_loader, tb_log, from_epoch) | 11,795 | 37.423453 | 137 | py |
FPConv | FPConv-master/tools/train_s3dis.py | import os, sys
import argparse
import importlib
import numpy as np
import json
import time
import tensorboard_logger as tb_log
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets.s3dis_dataset import S3DIS
from utils.saver import Saver
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='0,1,2,3')
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--epochs", type=int, default=101)
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--num_classes', type=int, default=13)
parser.add_argument('--eval_freq', type=int, default=10)
parser.add_argument('--start_eval_epoch', type=int, default=0)
parser.add_argument("--accum_steps", type=int, default=8)
parser.add_argument('--sample_rate_eval', type=float, default=1)
parser.add_argument('--sample_rate_train', type=float, default=0.5)
parser.add_argument('--num_pts', type=int, default=14564)
parser.add_argument('--block_size', type=float, default=2)
parser.add_argument('--test_area', type=int, default=5)
parser.add_argument("--model", type=str, default='fpcnn_s3dis')
parser.add_argument("--save_dir", type=str, default='logs/test_s3dis/')
parser.add_argument("--config", type=str, default='./config.json')
parser.add_argument('--bn_momentum', type=float, default=0.02)
parser.add_argument('--warmup_epochs', type=int, default=8)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lr_decay', type=float, default=0.5)
parser.add_argument('--lr_clip', type=float, default=0.000001)
parser.add_argument('--decay_step_list', type=list, default=[25, 50, 75])
parser.add_argument('--weight_decay', type=float, default=0.001)
parser.add_argument("--resume", type=str, default=None)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print(args)
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
NUM_CLASSES = args.num_classes
NUM_POINTS = args.num_pts
saver = Saver(args.save_dir)
class WarmStart:
"""Warm up learning rate"""
def __init__(self, optimizer, steps, lr):
'''
steps: Warm up steps, if it is 0, warm up is not activated
lr: target learning rate
'''
self.optimizer = optimizer
self.steps = steps
self.iter = 0
if steps != 0:
self.increment = lr / steps
for param_group in self.optimizer.param_groups:
param_group['lr'] = 0
def step(self):
self.iter += 1
if self.iter < self.steps:
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.iter * self.increment
def log_str(info):
print(info)
def reset_bn(model, momentum=args.bn_momentum):
'''
Reset bn momentum
'''
for m in model.modules():
if isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d):
m.momentum = momentum
def save_config(args, _cfg):
'''
Save configs currently using, along with the checkpoints, etc.
'''
f1 = os.path.join(args.save_dir, 'args.txt')
f2 = os.path.join(args.save_dir, 'configs.txt')
with open(f1, 'w') as f:
json.dump(args.__dict__, f, indent=2)
with open(f2, 'w') as f:
json.dump(_cfg, f, indent=2)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
print("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
print("==> Done")
else:
raise FileNotFoundError
return epoch
class CrossEntropyLossWithWeights(torch.nn.Module):
def __init__(self):
super().__init__()
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, predict, target, weights):
"""
:param predict: (B,N,C)
:param target: (B,N)
:param weights: (B,N)
:return:
"""
predict = predict.view(-1, NUM_CLASSES).contiguous() # B*N, C
target = target.view(-1).contiguous().cuda().long() # B*N
weights = weights.view(-1).contiguous().cuda().float() # B*N
loss = self.cross_entropy_loss(predict, target) # B*N
loss *= weights
loss = torch.mean(loss)
return loss
def train_one_epoch(model, dst_loader, optimizer, epoch, tb_log, warmup=None):
model.train()
loss_func = CrossEntropyLossWithWeights()
optimizer.zero_grad()
loss_list = []
total_correct = 0
total_seen = 0
start_time = time.time()
for it, batch in enumerate(dst_loader):
point_set, semantic_seg, sample_weight = batch
point_set = point_set.cuda().float()
predict = model(point_set) # B,N,C
loss = loss_func(predict, semantic_seg, sample_weight)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if (it + 1) % args.accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
if epoch <= args.warmup_epochs:
warmup.step()
# 1. loss
loss_list.append(loss.item())
# 2. accuracy
predict = torch.argmax(predict, dim=2).cpu().numpy() # B,N
semantic_seg = semantic_seg.numpy()
correct = np.sum(predict == semantic_seg)
batch_seen = predict.shape[0] * NUM_POINTS
total_correct += correct
total_seen += batch_seen
if (it + 1) % 100 == 0:
time_cost = time.time() - start_time
log_str(' -- batch: {}/{} -- '.format(it + 1, len(dst_loader)))
log_str('accuracy: {:.4f}'.format(total_correct / total_seen))
log_str('mean loss: {:.4f}'.format(np.mean(loss_list)))
log_str('time cost: {:.2f}'.format(time_cost))
start_time = time.time()
iternum = epoch * len(dst_loader) + it + 1
tb_log.log_value('Train/IterAcc', total_correct /
total_seen, iternum)
tb_log.log_value('Train/IterLoss', np.mean(loss_list), iternum)
tb_log.log_value('Train/Learning rate',
optimizer.param_groups[0]['lr'], iternum)
log_str(' -- epoch accuracy: {:.4f}'.format(total_correct / total_seen))
log_str(' -- epoch mean loss: {:.4f}'.format(np.mean(loss_list)))
tb_log.log_value('Train/epoch oA', total_correct / total_seen, epoch)
tb_log.log_value('Train/epoch loss', np.mean(loss_list), epoch)
lr = optimizer.param_groups[0]['lr']
tb_log.log_value('Learning rate', lr, epoch)
def eval_one_epoch(model, dst_loader, epoch, tb_log):
model.eval()
loss_func = CrossEntropyLossWithWeights()
total_correct = 0
total_seen = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_iou_deno_class = [0 for _ in range(NUM_CLASSES)]
loss_list = []
with torch.no_grad():
for it, batch in enumerate(dst_loader):
batch_data, batch_label, batch_smpw = batch
batch_data = batch_data.cuda().float()
pred_val = model(batch_data) # B,N,C
loss = loss_func(pred_val, batch_label, batch_smpw)
loss_list.append(loss.item())
pred_val = torch.argmax(pred_val, dim=2).cpu().numpy() # B,N
batch_label = batch_label.numpy()
batch_smpw = batch_smpw.numpy()
aug_data = batch_data.cpu().numpy()
correct = np.sum((pred_val == batch_label))
total_correct += correct
total_seen += np.sum((batch_label >= 0) & (batch_smpw > 0))
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label == l)
& (batch_smpw > 0))
total_correct_class[l] += np.sum((pred_val == l)
& (batch_label == l) & (batch_smpw > 0))
total_iou_deno_class[l] += np.sum(
((pred_val == l) | (batch_label == l)) & (batch_smpw > 0))
IoU = np.array(total_correct_class /
np.array(total_iou_deno_class, dtype=np.float) + 1e-6)
avg_acc = np.mean(np.array(total_correct_class) /
(np.array(total_seen_class, dtype=np.float) + 1e-6))
if np.mean(loss_list) > 20:
saver.save_checkpoint(
model, epoch, 'loss_explosion_epoch_{}'.format(epoch))
log_str('eval point avg class IoU: %f' % (np.mean(IoU)))
IoU_Class = 'Each Class IoU:::\n'
for i in range(IoU.shape[0]):
log_str('Class %d : %.4f' % (i + 1, IoU[i]))
log_str('eval accuracy: %f' % (total_correct / float(total_seen)))
log_str('eval avg class acc: %f' % (avg_acc))
tb_log.log_value('Eval/mIoU', np.mean(IoU), epoch)
tb_log.log_value('Eval/oA', total_correct / float(total_seen), epoch)
tb_log.log_value('Eval/mA', avg_acc, epoch)
tb_log.log_value('Eval/Loss', np.mean(loss_list), epoch)
return np.mean(IoU)
def train(model, train_loader, eval_loader, tb_log, resume_epoch=0):
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.98,
weight_decay=args.weight_decay,
nesterov=True)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
warmup_steps = int(args.warmup_epochs * len(train_loader) / args.accum_steps)
warmup = WarmStart(optimizer, warmup_steps, args.lr)
best_miou = 0
best_epoch = 0
for epoch in range(args.epochs):
if epoch < resume_epoch:
continue
elif resume_epoch > 0:
log_str('====== resume epoch {} ======'.format(epoch))
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
log_str(' === Best mIoU: {}, epoch {}. === '.format(
best_miou, best_epoch))
if epoch >= args.warmup_epochs:
lr_scheduler.step(epoch)
resume_epoch = 0
continue
log_str('====== epoch {} ======'.format(epoch))
train_one_epoch(model, train_loader, optimizer, epoch, tb_log, warmup)
if epoch >= args.warmup_epochs:
lr_scheduler.step(epoch)
if (epoch >= args.start_eval_epoch and epoch % args.eval_freq == 0) or \
(epoch > 80 and epoch % 2 == 0):
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
saver.save_checkpoint(
model, epoch, 'pn2_best_epoch_{}'.format(epoch))
log_str(' === Best mIoU: {}, epoch {}. === '.format(
best_miou, best_epoch))
if __name__ == '__main__':
save_config(args, _cfg)
MODEL = importlib.import_module('models.' + args.model)
input_channels = 6
print('model input_channel: {}.'.format(input_channels))
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=input_channels)
reset_bn(model)
# resume
from_epoch = 0
if args.resume:
from_epoch = load_checkpoint(model, args.resume)
print("resume from {}".format(from_epoch))
model = nn.parallel.DataParallel(model)
model.cuda()
# init tb_log
tb_log.configure(os.path.join(args.save_dir, 'tensorboard'))
# Sample rate is the num of voting
eval_dst = S3DIS(split='eval',
data_root=_cfg['s3dis_data_root'],
num_point=args.num_pts,
test_area=args.test_area,
block_size=args.block_size,
sample_rate=args.sample_rate_eval,
transform=None,
if_normal=True)
eval_loader = DataLoader(eval_dst, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)
train_dst = S3DIS(split='train',
data_root=_cfg['s3dis_data_root'],
num_point=args.num_pts,
test_area=args.test_area,
block_size=args.block_size,
sample_rate=args.sample_rate_train,
transform=None,
if_normal=True)
train_loader = DataLoader(train_dst, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
# train model
train(model, train_loader, eval_loader, tb_log, from_epoch)
| 12,945 | 35.162011 | 125 | py |
FPConv | FPConv-master/tools/vis_scannet.py | import numpy as np
import open3d as o3d
import os
import argparse
import sys
parser = argparse.ArgumentParser(description='Visualize point cloud')
parser.add_argument('--file_dir', type=str, help=None)
parser.add_argument('--scene_id', type=str, help=None)
args = parser.parse_args()
print(args)
color_map = [[ 0, 0, 0], # unlabeled (white)
[190, 153, 112], # wall
[189, 198, 255], # floor
[213, 255, 0], # cabinet
[158, 0, 142], # bed
[152, 255, 82], # chair
[119, 77, 0], # sofa
[122, 71, 130], # table
[ 0, 174, 126], # door
[ 0, 125, 181], # window
[ 0, 143, 156], # bookshelf
[107, 104, 130], # picture
[255, 229, 2], # counter
[ 1, 255, 254], # desk
[255, 166, 254], # curtain
[232, 94, 190], # refridgerator
[ 0, 100, 1], # shower curtain
[133, 169, 0], # toilet
[149, 0, 58], # sink
[187, 136, 0], # bathtub
[ 0, 0, 255]] # otherfurniture (blue)
color_map = np.array(color_map)
def vis(tag):
file_path = os.path.join(args.file_dir, args.scene_id)
xyzrgb = np.load(file_path + '_points.npy')
if tag == 'labels':
points = np.load(file_path + '_labels.npy').astype(np.int)
points = color_map[points, :]
xyzrgb[:, 3:] = points
elif tag == 'preds':
points = np.load(file_path + '_preds.npy')
points = np.argmax(points, axis=1)
points = color_map[points, :]
xyzrgb[:, 3:] = points
xyzrgb[:, 3:] /= 255
cache_file = os.path.join(args.file_dir, args.scene_id + '_vis_cached_file.txt')
np.savetxt(cache_file, xyzrgb)
pcd = o3d.io.read_point_cloud(cache_file, format='xyzrgb')
os.remove(cache_file)
o3d.visualization.draw_geometries([pcd])
if __name__ == '__main__':
vis('points')
vis('labels')
vis('preds')
| 2,015 | 31.516129 | 84 | py |
FPConv | FPConv-master/models/fpcnn_scannet.py | import torch
import torch.nn as nn
from fpconv.pointnet2.pointnet2_modules import PointnetFPModule
import fpconv.pointnet2.pytorch_utils as pt_utils
from fpconv.base import AssemRes_BaseBlock
from fpconv.fpconv import FPConv4x4_BaseBlock, FPConv6x6_BaseBlock
NPOINT = 8192
NPOINTS = [NPOINT // 2, NPOINT // 8, NPOINT // 32, NPOINT // 128]
RADIUS = [0.1, 0.2, 0.4, 0.8, 1.6]
NSAMPLE = [32, 32, 32, 32, 16]
MLPS = [[32,32], [64,64], [128,128], [256,256], [512,512]]
FP_MLPS = [[64,64], [128,64], [256,128], [512,256]]
CLS_FC = [64]
DP_RATIO = 0.5
def get_model(num_class, input_channels=3, num_pts=None):
if num_pts is None:
num_pts = NPOINT
return FPCNN_ScanNet(num_pts, num_class, input_channels)
class FPCNN_ScanNet(nn.Module):
def __init__(self, num_pts, num_class, input_channels, use_xyz=False):
# input_channels: input feature channels (not include xyz)
super().__init__()
NPOINT = num_pts
NPOINTS = [NPOINT // 2, NPOINT // 8, NPOINT // 32, NPOINT // 128]
print(NPOINTS)
self.SA_modules = nn.ModuleList()
self.conv0 = AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=None,
radius=RADIUS[0],
nsample=NSAMPLE[0],
channel_list=[input_channels] + MLPS[0],
use_xyz=use_xyz)
channel_in = MLPS[0][-1]
skip_channel_list = [channel_in]
for k in range(NPOINTS.__len__()):
mlps = [MLPS[k+1].copy()]
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
print(mlps[0], RADIUS[k], RADIUS[k+1])
if k < 2:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
else:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv4x4_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(FP_MLPS.__len__()):
pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
mlp = [pre_channel + skip_channel_list[k]] + FP_MLPS[k]
print(mlp)
self.FP_modules.append(PointnetFPModule(mlp=mlp))
cls_layers = []
pre_channel = FP_MLPS[0][-1]
for k in range(0, CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv2d(pre_channel, CLS_FC[k], bn=True))
pre_channel = CLS_FC[k]
cls_layers.append(pt_utils.Conv2d(pre_channel, num_class, activation=None, bn=False))
cls_layers.insert(1, nn.Dropout(0.5))
self.cls_layer = nn.Sequential(*cls_layers)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
_, features = self.conv0(xyz, features)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i])
fn_feats = l_features[0].unsqueeze(-1) # B, C, N, 1
pred_cls = self.cls_layer(fn_feats).squeeze(-1).transpose(1, 2).contiguous() # B, N, C
return pred_cls
| 4,571 | 37.1 | 94 | py |
FPConv | FPConv-master/models/fpcnn_s3dis.py | import torch
import torch.nn as nn
from fpconv.pointnet2.pointnet2_modules import PointnetFPModule, PointnetSAModule
import fpconv.pointnet2.pytorch_utils as pt_utils
from fpconv.base import AssemRes_BaseBlock
from fpconv.fpconv import FPConv4x4_BaseBlock, FPConv6x6_BaseBlock
NPOINTS = [8192, 2048, 512, 128]
RADIUS = [0.1, 0.2, 0.4, 0.8, 1.6]
NSAMPLE = [32, 32, 32, 32, 16]
MLPS = [[64,64], [128,128], [256,256], [512,512], [1024,1024]]
FP_MLPS = [[128,128], [256,128], [512,256], [1024,512]]
CLS_FC = [128]
DP_RATIO = 0.5
def get_model(num_class, input_channels=3):
return Pointnet2SSG(num_class, input_channels)
class Pointnet2SSG(nn.Module):
def __init__(self, num_class, input_channels=3, use_xyz=False):
# input_channels: input feature channels (not include xyz)
super().__init__()
print(NPOINTS)
self.SA_modules = nn.ModuleList()
self.conv0 = AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=None,
radius=RADIUS[0],
nsample=NSAMPLE[0],
channel_list=[input_channels] + MLPS[0],
use_xyz=use_xyz)
channel_in = MLPS[0][-1]
skip_channel_list = [channel_in]
for k in range(NPOINTS.__len__()):
mlps = [MLPS[k+1].copy()]
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
print(mlps[0], RADIUS[k], RADIUS[k+1])
if k < 2:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
else:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv4x4_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(FP_MLPS.__len__()):
pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
mlp = [pre_channel + skip_channel_list[k]] + FP_MLPS[k]
print(mlp)
self.FP_modules.append(PointnetFPModule(mlp=mlp))
cls_layers = []
pre_channel = FP_MLPS[0][-1]
for k in range(0, CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv2d(pre_channel, CLS_FC[k], bn=True))
pre_channel = CLS_FC[k]
cls_layers.append(pt_utils.Conv2d(pre_channel, num_class, activation=None, bn=False))
cls_layers.insert(1, nn.Dropout(0.5))
self.cls_layer = nn.Sequential(*cls_layers)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
_, features = self.conv0(xyz, features)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
fn_feats = l_features[0].unsqueeze(-1) # B, C, N, 1
pred_cls = self.cls_layer(fn_feats).squeeze(-1).transpose(1, 2).contiguous() # B, N, C
return pred_cls | 4,378 | 37.412281 | 94 | py |
FPConv | FPConv-master/fpconv/base.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from fpconv.pointnet2 import pointnet2_utils
from fpconv.pointnet2 import pytorch_utils as pt_utils
relu_alpha = 0.2
class PointNet(nn.Module):
def __init__(self, mlp, pool='max', bn=True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn, activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True))
def forward(self, pcd):
'''
:param pcd: B, C, npoint, nsample
:return:
new_pcd: B, C_new, npoint, 1
'''
new_pcd = self.mlp(pcd) # B, C_new, npoint, nsample
new_pcd = F.max_pool2d(new_pcd, kernel_size=[1, new_pcd.size(3)]) # B, C_new, npoint, 1
return new_pcd
class ProjWeightModule(nn.Module):
def __init__(self, mlp_pn, mlp_wts, map_size, bn=True):
super().__init__()
map_len = map_size ** 2
mlp_pn = [3] + mlp_pn
mlp_wts = [mlp_pn[-1] + 3] + mlp_wts + [map_len] # 3+C_new => map_len
self.pn_layer = PointNet(mlp_pn, bn=bn)
self.wts_layer = pt_utils.SharedMLP(mlp_wts,
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True))
def forward(self, xyz):
'''
:param xyz: B, 3, npoint, nsample <local>
:return:
proj_wts: B, map_len, npoint, nsample
'''
nsample = xyz.size(3)
dist_feat = self.pn_layer(xyz) # B, C_new, npoint, 1
dist_feat = dist_feat.expand(-1, -1, -1, nsample) # B, C_new, npoint, nsample
dist_feat = torch.cat([xyz, dist_feat], dim=1) # B, C_new+3, npoint, nsample
proj_wts = self.wts_layer(dist_feat) # B, map_len, npoint, nsample
return proj_wts
class PN_Block(nn.Module):
def __init__(self, in_channel, out_channel, bn=True, activation=True):
# Shared MLPs
super().__init__()
self.conv = pt_utils.Conv2d(in_size=in_channel,
out_size=out_channel,
kernel_size=(1,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True) if activation else None)
def forward(self, pcd):
'''
:param pcd: B, C_in, npoint
:return:
new_pcd: B, C_out, npoint
'''
pcd = pcd.unsqueeze(-1)
return self.conv(pcd).squeeze(-1)
class Pooling_Block(nn.Module):
def __init__(self, radius, nsample, in_channel, out_channel, npoint=None, bn=True, activation=True):
super().__init__()
self.radius = radius
self.nsample = nsample
self.npoint = npoint
self.conv = PN_Block(in_channel, out_channel, bn=bn, activation=activation)
def forward(self, xyz, feats, new_xyz=None):
'''
:param pcd: B, C_in, N
:return:
new_pcd: B, C_out, np
'''
if new_xyz is None:
assert self.npoint is not None
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
idx = pointnet2_utils.ball_query(self.radius, self.nsample, xyz, new_xyz)
gped_feats = pointnet2_utils.grouping_operation(feats, idx) # B,C,np,ns
gped_feats = F.max_pool2d(gped_feats, kernel_size=[1, self.nsample]) # B,C,np,1
gped_feats = gped_feats.squeeze(-1) # B,C,np
return self.conv(gped_feats)
class Resnet_BaseBlock(nn.Module):
def __init__(self, FPCONV,
npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
'''
pcd => 1x1 conv <relu+bn> => tconv <relu+bn> => 1x1 conv <bn>
shortcut: pcd => (max_pooling) => 1x1 conv <bn> [apply projection shortcut]
:param npoint: set to None to ignore 'max_pooling'
:param nsample, radius: params related to grouper
'''
super().__init__()
self.keep_pcd = npoint is None
self.is_im = in_channel == out_channel
self.mid_channel = out_channel // 2 # <Bottleneck Design Block>
self.conv1 = PN_Block(in_channel=in_channel,
out_channel=self.mid_channel,
bn=bn)
self.conv2 = FPCONV(npoint=npoint,
nsample=nsample,
radius=radius,
in_channel=self.mid_channel,
out_channel=self.mid_channel,
bn=bn,
use_xyz=use_xyz)
self.conv3 = PN_Block(in_channel=self.mid_channel,
out_channel=out_channel,
bn=bn,
activation=False)
if self.keep_pcd and not self.is_im:
self.sonv0 = PN_Block(in_channel=in_channel,
out_channel=out_channel,
bn=bn,
activation=False)
elif not self.keep_pcd:
self.sonv0 = Pooling_Block(radius=radius,
nsample=nsample,
in_channel=in_channel,
out_channel=out_channel,
bn=bn,
activation=False)
def forward(self, xyz, feats, new_xyz=None):
assert (self.keep_pcd and new_xyz is None) or not self.keep_pcd, 'invalid new_xyz.'
new_feats = self.conv1(feats)
new_xyz, new_feats = self.conv2(xyz, new_feats, new_xyz)
new_feats = self.conv3(new_feats)
shc_feats = feats
if self.keep_pcd and not self.is_im: # if in != out, applt an additional projection mlp
shc_feats = self.sonv0(shc_feats) # mlp
if not self.keep_pcd: # not keep pcd, apply fpconv with fps
shc_feats = self.sonv0(xyz, feats, new_xyz) # pooling + mlp
new_feats = F.leaky_relu(shc_feats + new_feats, negative_slope=relu_alpha ,inplace=True)
return new_xyz, new_feats
class AssemRes_BaseBlock(nn.Module):
def __init__(self, CONV_BASE,
npoint, nsample, radius, channel_list, nsample_ds=None, radius_ds=None, bn=True, use_xyz=False):
'''
Apply downsample and conv on input pcd
:param npoint: the number of points to sample
:param nsample: the number of neighbors to group when conv
:param radius: radius of ball query to group neighbors
:param channel_list: List<a, c, c, ...>, the elements from <1> to the last must be the same
'''
super().__init__()
if nsample_ds is None:
nsample_ds = nsample
if radius_ds is None:
radius_ds = radius
self.conv_blocks = nn.ModuleList()
for i in range(len(channel_list) - 1):
in_channel = channel_list[i]
out_channel = channel_list[i+1]
self.conv_blocks.append(Resnet_BaseBlock(FPCONV=CONV_BASE,
npoint=npoint if i == 0 else None,
nsample=nsample if i == 0 else nsample_ds,
radius=radius if i == 0 else radius_ds,
in_channel=in_channel,
out_channel=out_channel,
bn=bn,
use_xyz=use_xyz))
def forward(self, xyz, feats, new_xyz=None):
for i, block in enumerate(self.conv_blocks):
xyz, feats = block(xyz, feats, new_xyz)
return xyz, feats
| 8,204 | 40.649746 | 125 | py |
FPConv | FPConv-master/fpconv/fpconv.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from fpconv.pointnet2 import pointnet2_utils
from fpconv.pointnet2 import pytorch_utils as pt_utils
from fpconv import base
relu_alpha = 0.2
class FPConv4x4_BaseBlock(nn.Module):
def __init__(self, npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
super().__init__()
print('fpconv4x4 init:', npoint, nsample, radius, in_channel, out_channel)
self.npoint = npoint
self.nsample = nsample
self.keep_pcd = npoint is None
self.use_xyz = use_xyz
self.grouper = pointnet2_utils.QueryAndGroupLocal(radius, nsample)
self.wts_layer = base.ProjWeightModule(mlp_pn=[8,16], mlp_wts=[16], map_size=4, bn=bn)
if use_xyz:
in_channel += 3
self.proj_conv = pt_utils.Conv2d(in_size=in_channel,
out_size=out_channel,
kernel_size=(16,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True))
def forward(self, xyz, features, new_xyz=None):
'''
:param xyz: B,N,3
:param features: B,C,N
:returns:
new_xyz: B,np,3
new_feats: B,C,np
'''
# sample new xyz
if not self.keep_pcd and new_xyz is None:
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
elif new_xyz is not None:
self.npoint = new_xyz.size(1)
else: # keep pcd
new_xyz = xyz
self.npoint = new_xyz.size(1)
# get distribution vector
grouped_xyz, grouped_feats = self.grouper(xyz, new_xyz, features)
proj_wts = self.wts_layer(grouped_xyz) # B,ml+1,np,ns
if self.use_xyz:
grouped_feats = torch.cat([grouped_xyz, grouped_feats], dim=1)
# normalize weights
# normalize at dim 1 <ml>
proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=1, keepdim=True) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts = proj_wts / proj_wts_sum
# normalize at dim 3 <nsample>
proj_wts_sum = torch.sum(proj_wts2_, dim=3, keepdim=True) # B,ml,np,1
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1.0).cuda())
proj_wts = proj_wts / proj_wts_sum # B,ml,np,ns
# projection
proj_wts = proj_wts.transpose(1,2) # B, np, ml, ns
grouped_feats = grouped_feats.permute(0, 2, 3, 1) # B, C, np, bs => B, np, ns, C
multi = proj_wts.matmul(grouped_feats)
proj_feats = F.leaky_relu(proj_wts.matmul(grouped_feats), negative_slope=relu_alpha, inplace=True) # B, np, ml, C
proj_feats = proj_feats.transpose(1,3) # B, C, ml, np
# convolution
proj_feats = self.proj_conv(proj_feats) # B, C_new, 1, np
proj_feats = proj_feats.squeeze(2) # B, C_new, np
return new_xyz, proj_feats
class FPConv6x6_BaseBlock(nn.Module):
def __init__(self, npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
super().__init__()
print('fpconv6x6 init:', npoint, nsample, radius, in_channel, out_channel)
self.npoint = npoint
self.map_size = 6
self.map_len = self.map_size ** 2
self.nsample = nsample
self.keep_pcd = npoint is None
self.use_xyz = use_xyz
self.grouper = pointnet2_utils.QueryAndGroupLocal(radius, nsample)
self.wts_layer = base.ProjWeightModule(mlp_pn=[8,16,16], mlp_wts=[16,32], map_size=6, bn=bn)
if use_xyz:
in_channel += 3
self.bias = Parameter(torch.Tensor(in_channel))
mid_channel = in_channel
self.proj_conv = nn.Sequential(
pt_utils.Conv3d(in_size=in_channel,
out_size=mid_channel,
kernel_size=(3,3,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)),
pt_utils.Conv3d(in_size=in_channel,
out_size=mid_channel,
kernel_size=(3,3,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)),
pt_utils.Conv3d(in_size=mid_channel,
out_size=out_channel,
kernel_size=(2,2,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.bias, -0.05)
def forward(self, xyz, features, new_xyz=None):
'''
:param xyz: B,N,3
:param features: B,C,N
:returns:
new_xyz: B,np,3
new_feats: B,C,np
'''
# sample new xyz
if not self.keep_pcd and new_xyz is None:
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
elif new_xyz is not None:
idx = None
self.npoint = new_xyz.size(1)
else:
idx = None
new_xyz = xyz
self.npoint = new_xyz.size(1)
# get distribution vector
grouped_xyz, grouped_feats = self.grouper(xyz, new_xyz, features)
proj_wts = self.wts_layer(grouped_xyz) # B,ml,np,ns
if self.use_xyz:
grouped_feats = torch.cat([grouped_xyz, grouped_feats], dim=1)
# normalize weights
# normalize at dim 1 <ml>
proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=1, keepdim=True) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts = proj_wts / proj_wts_sum
# normalize at dim 3 <nsample>
# proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=3, keepdim=True) # B,ml,np,1
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1.0).cuda())
proj_wts = proj_wts / proj_wts_sum # B,ml,np,ns
# projection
proj_wts = proj_wts.transpose(1,2) # B, np, ml, ns
grouped_feats = grouped_feats.permute(0, 2, 3, 1) # B, C, np, bs => B, np, ns, C
proj_feats = F.leaky_relu(proj_wts.matmul(grouped_feats) + self.bias, negative_slope=relu_alpha, inplace=True) # B, np, ml, C
# reshape projection features # B, np, ml, C => B, C, ms, ms, np
bs = proj_feats.size(0)
proj_feats = proj_feats.transpose(1, 3) # B, C, ml, np
proj_feats = proj_feats.view(bs, -1, self.map_size, self.map_size, self.npoint).contiguous() # B, C, ms, ms, np
# convolution
proj_feats = self.proj_conv(proj_feats) # B, C_new, 1, 1, np
proj_feats = proj_feats.squeeze(3).squeeze(2) # B, C_new, np
return new_xyz, proj_feats | 8,087 | 41.793651 | 133 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.