id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
4,580 | import re
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.identity import ClientSecretCredential
from azure.keyvault.secrets import SecretClient
from exceptions import (
SecretNameAlreadyExistsException,
SecretNameInvalidException,
SecretNoSetPermissionException,
)
KVUri = f"https://{key_vault_name}.vault.azure.net"
def get_secret_client(
tenant_id: str, client_id: str, client_secret: str
) -> SecretClient:
credential = ClientSecretCredential(tenant_id, client_id, client_secret)
client = SecretClient(vault_url=KVUri, credential=credential)
return client | null |
4,581 | import re
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.identity import ClientSecretCredential
from azure.keyvault.secrets import SecretClient
from exceptions import (
SecretNameAlreadyExistsException,
SecretNameInvalidException,
SecretNoSetPermissionException,
)
def get_secret(secret_name: str, client: SecretClient):
secret = client.get_secret(secret_name)
return secret.value | null |
4,582 | import re
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.identity import ClientSecretCredential
from azure.keyvault.secrets import SecretClient
from exceptions import (
SecretNameAlreadyExistsException,
SecretNameInvalidException,
SecretNoSetPermissionException,
)
reserved_secret_names = []
def validate_secret_name(secret_name: str):
# Check if secret name is valid. Secret name can only contain alphanumeric characters and dashes.
pattern = "^[a-zA-Z0-9-]+$"
if not re.match(pattern, secret_name):
raise SecretNameInvalidException(
"Secret name can only contain alphanumeric characters and dashes"
)
# Check if secret name is one of the reserved names
if secret_name in reserved_secret_names:
raise SecretNameAlreadyExistsException(
f"Secret name {secret_name} already exists"
) | null |
4,583 | import re
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.identity import ClientSecretCredential
from azure.keyvault.secrets import SecretClient
from exceptions import (
SecretNameAlreadyExistsException,
SecretNameInvalidException,
SecretNoSetPermissionException,
)
key_vault_name = "github-promptflow"
def upload_secret(client: SecretClient, secret_name: str, secret_value: str):
try:
client.set_secret(secret_name, secret_value)
except ResourceExistsError as ex:
if "in a deleted but recoverable state" in str(ex):
raise SecretNameAlreadyExistsException(
f"Secret name {secret_name} is deleted but recoverable, and its name cannot be reused"
)
except HttpResponseError as ex:
if (
ex.status_code == 403
and "does not have secrets set permission on key vault" in str(ex)
):
raise SecretNoSetPermissionException(
f"No set permission on key vault {key_vault_name}"
)
print("Done.") | null |
4,584 | import argparse
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_parse import readme_parser
from ghactions_driver.readme_step import ReadmeStepsManage
def readme_parser(filename: str):
real_filename = Path(ReadmeStepsManage.git_base_dir()) / filename
data = pypandoc.convert_file(str(real_filename), "json")
f = io.StringIO(data)
doc = panflute.load(f)
panflute.run_filter(action, prepare, doc=doc)
return doc.full_text
class ReadmeStepsManage:
"""
# Static methods for manage all readme steps
"""
repo_base_dir = ""
def git_base_dir() -> str:
"""
Get the base directory of the git repo
"""
if ReadmeStepsManage.repo_base_dir == "":
try:
ReadmeStepsManage.repo_base_dir = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode("utf-8")
.strip()
)
raise Exception("Not in git repo")
except Exception:
ReadmeStepsManage.repo_base_dir = Path(__file__).parent.parent.parent.parent.resolve()
print(ReadmeStepsManage.repo_base_dir)
return ReadmeStepsManage.repo_base_dir
def write_workflow(
workflow_name: str, pipeline_name: str, output_telemetry=Telemetry()
) -> None:
# Schedule notebooks at different times to reduce maximum quota usage.
name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16)
schedule_minute = name_hash % 60
schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC
if "tutorials" in workflow_name:
# markdown filename has some exceptions, special handle here
if "chat_with_pdf" in workflow_name:
readme_name = "chat-with-pdf.md"
elif (
"fine_tuning_evaluation_promptflow_quality_improvement" in workflow_name
):
readme_name = "promptflow-quality-improvement.md"
else:
readme_name = "README.md"
readme_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ReadmeSteps.working_dir
/ readme_name
)
# local import to avoid circular import
from .resource_resolver import resolve_tutorial_resource
path_filter = resolve_tutorial_resource(
workflow_name, readme_path.resolve()
)
else:
if (
"flow_with_additional_includes" in workflow_name
or "flow_with_symlinks" in workflow_name
):
# these two flows have dependencies on flow web-classification
# so corresponding workflows should also listen to changes in web-classification
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ "examples/flows/standard/web-classification/**, "
+ f".github/workflows/{workflow_name}.yml ]"
)
else:
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ f".github/workflows/{workflow_name}.yml ]"
)
replacements = {
"steps": ReadmeSteps.step_array,
"workflow_name": workflow_name,
"ci_name": pipeline_name,
"path_filter": path_filter,
"crontab": f"{schedule_minute} {schedule_hour} * * *",
"crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT",
}
workflow_template_path = (
Path(ReadmeStepsManage.git_base_dir())
/ "scripts"
/ "readme"
/ "ghactions_driver"
/ "workflow_templates"
)
target_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ".github"
/ "workflows"
/ f"{workflow_name}.yml"
)
template = Environment(
loader=FileSystemLoader(workflow_template_path.resolve())
).get_template(ReadmeSteps.template)
content = template.render(replacements)
with open(target_path.resolve(), "w", encoding="utf-8") as f:
f.write(content)
print(f"Write readme workflow: {target_path.resolve()}")
output_telemetry.workflow_name = workflow_name
output_telemetry.target_path = target_path
output_telemetry.readme_folder = ReadmeSteps.working_dir
output_telemetry.readme_name = ReadmeSteps.readme_name
output_telemetry.path_filter = path_filter
def write_readme_shell(readme_path: str, output_folder: str):
full_text = readme_parser(readme_path)
Path(ReadmeStepsManage.git_base_dir())
bash_script_path = (
Path(ReadmeStepsManage.git_base_dir()) / output_folder / "bash_script.sh"
)
template_env = Environment(
loader=FileSystemLoader(
Path(ReadmeStepsManage.git_base_dir())
/ "scripts/readme/ghactions_driver/bash_script"
)
)
bash_script_template = template_env.get_template("bash_script.sh.jinja2")
with open(bash_script_path, "w") as f:
f.write(bash_script_template.render({"command": full_text})) | null |
4,585 | from pathlib import Path
from .readme_step import ReadmeStepsManage, ReadmeSteps
from ghactions_driver.telemetry_obj import Telemetry
class ReadmeSteps:
"""
Static class to record steps, to be filled in workflow templates and Readme
"""
step_array = [] # Record steps
readme_name = "" # Record readme name
working_dir = "" # the working directory of flow, relative to git_base_dir
template = "" # Select a base template under workflow_templates folder
workflow = "" # Target workflow name to be generated
def remember_step(step: Step) -> Step:
ReadmeSteps.step_array.append(step)
return step
def get_length() -> int:
return len(ReadmeSteps.step_array)
# region steps
def create_env() -> Step:
return ReadmeSteps.remember_step(CreateEnv())
def create_env_gpt4() -> Step:
return ReadmeSteps.remember_step(CreateEnvGPTFour())
def yml_create_aoai(yaml_name: str) -> Step:
return ReadmeSteps.remember_step(CreateAoaiFromYaml(yaml_name=yaml_name))
def env_create_aoai(connection_name: str) -> Step:
return ReadmeSteps.remember_step(
CreateAoaiFromEnv(connection_name=connection_name)
)
def azure_login() -> Step:
return ReadmeSteps.remember_step(AzureLoginStep())
def install_dependencies() -> Step:
return ReadmeSteps.remember_step(InstallDependenciesStep())
def install_dev_dependencies() -> Step:
return ReadmeSteps.remember_step(InstallDevDependenciesStep())
def create_run_yaml() -> Step:
return ReadmeSteps.remember_step(CreateRunYaml())
def extract_steps_and_run() -> Step:
return ReadmeSteps.remember_step(ExtractStepsAndRun())
def extract_steps_and_run_gpt_four() -> Step:
return ReadmeSteps.remember_step(ExtractStepsAndRunGPTFour())
# endregion steps
def setup_target(
working_dir: str, template: str, target: str, readme_name: str
) -> str:
"""
Used at the very head of jinja template to indicate basic information
"""
ReadmeSteps.working_dir = working_dir
ReadmeSteps.template = template
ReadmeSteps.workflow = target
ReadmeSteps.step_array = []
ReadmeSteps.readme_name = readme_name
return ""
def cleanup() -> None:
ReadmeSteps.working_dir = ""
ReadmeSteps.template = ""
ReadmeSteps.workflow = ""
ReadmeSteps.step_array = []
class ReadmeStepsManage:
"""
# Static methods for manage all readme steps
"""
repo_base_dir = ""
def git_base_dir() -> str:
"""
Get the base directory of the git repo
"""
if ReadmeStepsManage.repo_base_dir == "":
try:
ReadmeStepsManage.repo_base_dir = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode("utf-8")
.strip()
)
raise Exception("Not in git repo")
except Exception:
ReadmeStepsManage.repo_base_dir = Path(__file__).parent.parent.parent.parent.resolve()
print(ReadmeStepsManage.repo_base_dir)
return ReadmeStepsManage.repo_base_dir
def write_workflow(
workflow_name: str, pipeline_name: str, output_telemetry=Telemetry()
) -> None:
# Schedule notebooks at different times to reduce maximum quota usage.
name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16)
schedule_minute = name_hash % 60
schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC
if "tutorials" in workflow_name:
# markdown filename has some exceptions, special handle here
if "chat_with_pdf" in workflow_name:
readme_name = "chat-with-pdf.md"
elif (
"fine_tuning_evaluation_promptflow_quality_improvement" in workflow_name
):
readme_name = "promptflow-quality-improvement.md"
else:
readme_name = "README.md"
readme_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ReadmeSteps.working_dir
/ readme_name
)
# local import to avoid circular import
from .resource_resolver import resolve_tutorial_resource
path_filter = resolve_tutorial_resource(
workflow_name, readme_path.resolve()
)
else:
if (
"flow_with_additional_includes" in workflow_name
or "flow_with_symlinks" in workflow_name
):
# these two flows have dependencies on flow web-classification
# so corresponding workflows should also listen to changes in web-classification
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ "examples/flows/standard/web-classification/**, "
+ f".github/workflows/{workflow_name}.yml ]"
)
else:
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ f".github/workflows/{workflow_name}.yml ]"
)
replacements = {
"steps": ReadmeSteps.step_array,
"workflow_name": workflow_name,
"ci_name": pipeline_name,
"path_filter": path_filter,
"crontab": f"{schedule_minute} {schedule_hour} * * *",
"crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT",
}
workflow_template_path = (
Path(ReadmeStepsManage.git_base_dir())
/ "scripts"
/ "readme"
/ "ghactions_driver"
/ "workflow_templates"
)
target_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ".github"
/ "workflows"
/ f"{workflow_name}.yml"
)
template = Environment(
loader=FileSystemLoader(workflow_template_path.resolve())
).get_template(ReadmeSteps.template)
content = template.render(replacements)
with open(target_path.resolve(), "w", encoding="utf-8") as f:
f.write(content)
print(f"Write readme workflow: {target_path.resolve()}")
output_telemetry.workflow_name = workflow_name
output_telemetry.target_path = target_path
output_telemetry.readme_folder = ReadmeSteps.working_dir
output_telemetry.readme_name = ReadmeSteps.readme_name
output_telemetry.path_filter = path_filter
class Telemetry(object):
pass
def write_readme_workflow(readme_path, output_telemetry=Telemetry()):
relative_path = Path(readme_path).relative_to(
Path(ReadmeStepsManage.git_base_dir())
)
workflow_path = relative_path.parent.as_posix()
relative_name_path = Path(readme_path).relative_to(
Path(ReadmeStepsManage.git_base_dir()) / "examples"
)
workflow_name = (
relative_name_path.as_posix()
.replace(".md", "")
.replace("/README", "")
.replace("/", "_")
.replace("-", "_")
)
workflow_name = "samples_" + workflow_name
ReadmeSteps.setup_target(
working_dir=workflow_path,
template="basic_workflow_replace_config_json.yml.jinja2"
if "e2e_development_chat_with_pdf" in workflow_name
else "basic_workflow_replace.yml.jinja2",
target=f"{workflow_name}.yml",
readme_name=relative_path.as_posix(),
)
ReadmeSteps.install_dependencies()
ReadmeSteps.install_dev_dependencies()
if (
workflow_name.endswith("flows_chat_chat_with_image")
or workflow_name.endswith("flows_standard_describe_image")
):
ReadmeSteps.create_env_gpt4()
ReadmeSteps.env_create_aoai("aoai_gpt4v_connection")
else:
ReadmeSteps.create_env()
if workflow_name.endswith("pdf"):
ReadmeSteps.env_create_aoai("chat_with_pdf_custom_connection")
ReadmeSteps.create_run_yaml()
if (
workflow_name.endswith("flows_standard_basic_with_builtin_llm")
or workflow_name.endswith("flows_standard_flow_with_symlinks")
or workflow_name.endswith("flows_standard_flow_with_additional_includes")
or workflow_name.endswith("flows_standard_basic_with_connection")
):
ReadmeSteps.yml_create_aoai("examples/connections/azure_openai.yml")
ReadmeSteps.azure_login()
if (
workflow_name.endswith("flows_chat_chat_with_image")
or workflow_name.endswith("flows_standard_describe_image")
):
ReadmeSteps.extract_steps_and_run_gpt_four()
else:
ReadmeSteps.extract_steps_and_run()
ReadmeStepsManage.write_workflow(
workflow_name, "samples_readme_ci", output_telemetry
)
ReadmeSteps.cleanup() | null |
4,586 | import os
import glob
import argparse
from pathlib import Path
import ntpath
import re
import hashlib
import json
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_step import ReadmeStepsManage
from ghactions_driver.resource_resolver import resolve_tutorial_resource
from ghactions_driver.telemetry_obj import Telemetry
def format_ipynb(notebooks):
# run code formatter on .ipynb files
for notebook in notebooks:
os.system(f"black-nb --clear-output {notebook}") | null |
4,587 | import os
import glob
import argparse
from pathlib import Path
import ntpath
import re
import hashlib
import json
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_step import ReadmeStepsManage
from ghactions_driver.resource_resolver import resolve_tutorial_resource
from ghactions_driver.telemetry_obj import Telemetry
The provided code snippet includes necessary dependencies for implementing the `_get_paths` function. Write a Python function `def _get_paths(paths_list)` to solve the following problem:
Convert the path list to unix format. :param paths_list: The input path list. :returns: The same list with unix-like paths.
Here is the function:
def _get_paths(paths_list):
"""
Convert the path list to unix format.
:param paths_list: The input path list.
:returns: The same list with unix-like paths.
"""
paths_list.sort()
if ntpath.sep == os.path.sep:
return [pth.replace(ntpath.sep, "/") for pth in paths_list]
return paths_list | Convert the path list to unix format. :param paths_list: The input path list. :returns: The same list with unix-like paths. |
4,588 | import os
import glob
import argparse
from pathlib import Path
import ntpath
import re
import hashlib
import json
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_step import ReadmeStepsManage
from ghactions_driver.resource_resolver import resolve_tutorial_resource
from ghactions_driver.telemetry_obj import Telemetry
def write_notebook_workflow(notebook, name, output_telemetry=Telemetry()):
temp_name_list = re.split(r"/|\.", notebook)
temp_name_list = [
x
for x in temp_name_list
if x != "tutorials" and x != "examples" and x != "ipynb"
]
temp_name_list = [x.replace("-", "") for x in temp_name_list]
workflow_name = "_".join(["samples"] + temp_name_list)
place_to_write = (
Path(ReadmeStepsManage.git_base_dir())
/ ".github"
/ "workflows"
/ f"{workflow_name}.yml"
)
gh_working_dir = "/".join(notebook.split("/")[:-1])
env = Environment(
loader=FileSystemLoader("./scripts/readme/ghactions_driver/workflow_templates")
)
template = env.get_template("basic_workflow.yml.jinja2")
# Schedule notebooks at different times to reduce maximum quota usage.
name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16)
schedule_minute = name_hash % 60
schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC
if "examples/tutorials" in gh_working_dir:
notebook_path = Path(ReadmeStepsManage.git_base_dir()) / str(notebook)
path_filter = resolve_tutorial_resource(workflow_name, notebook_path.resolve())
elif "samples_configuration" in workflow_name:
# exception, samples configuration is very simple and not related to other prompt flow examples
path_filter = (
"[ examples/configuration.ipynb, .github/workflows/samples_configuration.yml ]"
)
else:
path_filter = f"[ {gh_working_dir}/**, examples/*requirements.txt, .github/workflows/{workflow_name}.yml ]"
# these workflows require config.json to init PF/ML client
workflows_require_config_json = [
"configuration",
"flowinpipeline",
"quickstartazure",
"cloudrunmanagement",
]
if any(keyword in workflow_name for keyword in workflows_require_config_json):
template = env.get_template("workflow_config_json.yml.jinja2")
elif "chatwithpdf" in workflow_name:
template = env.get_template("pdf_workflow.yml.jinja2")
elif "flowasfunction" in workflow_name:
template = env.get_template("flow_as_function.yml.jinja2")
content = template.render(
{
"workflow_name": workflow_name,
"ci_name": "samples_notebook_ci",
"name": name,
"gh_working_dir": gh_working_dir,
"path_filter": path_filter,
"crontab": f"{schedule_minute} {schedule_hour} * * *",
"crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT",
}
)
# To customize workflow, add new steps in steps.py
# make another function for special cases.
with open(place_to_write.resolve(), "w") as f:
f.write(content)
print(f"Write workflow: {place_to_write.resolve()}")
output_telemetry.workflow_name = workflow_name
output_telemetry.name = name
output_telemetry.gh_working_dir = gh_working_dir
output_telemetry.path_filter = path_filter
class Telemetry(object):
pass
def write_workflows(notebooks, output_telemetries=[]):
# process notebooks
for notebook in notebooks:
# get notebook name
output_telemetry = Telemetry()
nb_path = Path(notebook)
name, _ = os.path.splitext(nb_path.parts[-1])
# write workflow file
write_notebook_workflow(notebook, name, output_telemetry)
output_telemetry.notebook = nb_path
output_telemetries.append(output_telemetry) | null |
4,589 | import os
import glob
import argparse
from pathlib import Path
import ntpath
import re
import hashlib
import json
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_step import ReadmeStepsManage
from ghactions_driver.resource_resolver import resolve_tutorial_resource
from ghactions_driver.telemetry_obj import Telemetry
def local_filter(callback, array):
results = []
for index, item in enumerate(array):
result = callback(item, index, array)
# if returned true, append item to results
if result:
results.append(item)
return results | null |
4,590 | import os
import glob
import argparse
from pathlib import Path
import ntpath
import re
import hashlib
import json
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_step import ReadmeStepsManage
from ghactions_driver.resource_resolver import resolve_tutorial_resource
from ghactions_driver.telemetry_obj import Telemetry
The provided code snippet includes necessary dependencies for implementing the `no_readme_generation_filter` function. Write a Python function `def no_readme_generation_filter(item, index, array) -> bool` to solve the following problem:
Set each ipynb metadata no_readme_generation to "true" to skip readme generation
Here is the function:
def no_readme_generation_filter(item, index, array) -> bool:
"""
Set each ipynb metadata no_readme_generation to "true" to skip readme generation
"""
try:
if item.endswith("test.ipynb"):
return False
# read in notebook
with open(item, "r", encoding="utf-8") as f:
data = json.load(f)
try:
if data["metadata"]["no_readme_generation"] is not None:
# no_readme_generate == "true", then no generation
return data["metadata"]["no_readme_generation"] != "true"
except Exception:
return True # generate readme
except Exception:
return False # not generate readme | Set each ipynb metadata no_readme_generation to "true" to skip readme generation |
4,591 | import json
from pathlib import Path
import workflow_generator
import readme_generator
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_step import ReadmeStepsManage
from operator import itemgetter
import argparse
import sys
import os
import re
BRANCH = "main"
def get_notebook_readme_description(notebook) -> str:
def get_readme_description_first_sentence(readme) -> str:
class ReadmeStepsManage:
def git_base_dir() -> str:
def write_workflow(
workflow_name: str, pipeline_name: str, output_telemetry=Telemetry()
) -> None:
def write_readme(workflow_telemetries, readme_telemetries):
global BRANCH
ReadmeStepsManage.git_base_dir()
readme_file = Path(ReadmeStepsManage.git_base_dir()) / "examples/README.md"
quickstarts = {
"readmes": [],
"notebooks": [],
}
tutorials = {
"readmes": [],
"notebooks": [],
}
flows = {
"readmes": [],
"notebooks": [],
}
evaluations = {
"readmes": [],
"notebooks": [],
}
chats = {
"readmes": [],
"notebooks": [],
}
toolusecases = {
"readmes": [],
"notebooks": [],
}
connections = {
"readmes": [],
"notebooks": [],
}
for workflow_telemetry in workflow_telemetries:
notebook_name = f"{workflow_telemetry.name}.ipynb"
gh_working_dir = workflow_telemetry.gh_working_dir
pipeline_name = workflow_telemetry.workflow_name
yaml_name = f"{pipeline_name}.yml"
# For workflows, open ipynb as raw json and
# setup description at .metadata.description
description = get_notebook_readme_description(workflow_telemetry.notebook)
notebook_path = gh_working_dir.replace("examples/", "") + f"/{notebook_name}"
if gh_working_dir.startswith("examples/flows/standard"):
flows["notebooks"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif gh_working_dir.startswith("examples/connections"):
connections["notebooks"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif gh_working_dir.startswith("examples/flows/evaluation"):
evaluations["notebooks"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif gh_working_dir.startswith("examples/tutorials"):
if "quickstart" in notebook_name:
quickstarts["notebooks"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
else:
tutorials["notebooks"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif gh_working_dir.startswith("examples/flows/chat"):
chats["notebooks"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif gh_working_dir.startswith("examples/tools/use-cases"):
toolusecases["notebooks"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
else:
print(f"Unknown workflow type: {gh_working_dir}")
# Adjust tutorial names:
for readme_telemetry in readme_telemetries:
if readme_telemetry.readme_name.endswith("README.md"):
notebook_name = readme_telemetry.readme_folder.split("/")[-1]
else:
notebook_name = readme_telemetry.readme_name.split("/")[-1].replace(
".md", ""
)
notebook_path = readme_telemetry.readme_name.replace("examples/", "")
pipeline_name = readme_telemetry.workflow_name
yaml_name = f"{readme_telemetry.workflow_name}.yml"
description = get_readme_description_first_sentence(
readme_telemetry.readme_name
)
readme_folder = readme_telemetry.readme_folder
if readme_folder.startswith("examples/flows/standard"):
flows["readmes"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif readme_folder.startswith("examples/connections"):
connections["readmes"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif readme_folder.startswith("examples/flows/evaluation"):
evaluations["readmes"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif readme_folder.startswith("examples/tutorials"):
if "quickstart" in notebook_name:
quickstarts["readmes"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
else:
tutorials["readmes"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif readme_folder.startswith("examples/flows/chat"):
chats["readmes"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
elif readme_folder.startswith("examples/tools/use-cases"):
toolusecases["readmes"].append(
{
"name": notebook_name,
"path": notebook_path,
"pipeline_name": pipeline_name,
"yaml_name": yaml_name,
"description": description,
}
)
else:
print(f"Unknown workflow type: {readme_folder}")
quickstarts["notebooks"] = sorted(
quickstarts["notebooks"],
key=itemgetter("name"),
reverse=True,
)
replacement = {
"branch": BRANCH,
"tutorials": tutorials,
"flows": flows,
"evaluations": evaluations,
"chats": chats,
"toolusecases": toolusecases,
"connections": connections,
"quickstarts": quickstarts,
}
print("writing README.md...")
env = Environment(
loader=FileSystemLoader(
Path(ReadmeStepsManage.git_base_dir())
/ "scripts/readme/ghactions_driver/readme_templates"
)
)
template = env.get_template("README.md.jinja2")
with open(readme_file, "w") as f:
f.write(template.render(replacement))
print("finished writing README.md") | null |
4,592 | import argparse
from pathlib import Path
from functools import reduce
from ghactions_driver.readme_workflow_generate import write_readme_workflow
from ghactions_driver.readme_step import ReadmeStepsManage, ReadmeSteps
from ghactions_driver.readme_parse import readme_parser
from ghactions_driver.telemetry_obj import Telemetry
def local_filter(callback, array: [Path]):
results = []
for index, item in enumerate(array):
result = callback(item, index, array)
# if returned true, append item to results
if result:
results.append(item)
return results | null |
4,593 | import argparse
from pathlib import Path
from functools import reduce
from ghactions_driver.readme_workflow_generate import write_readme_workflow
from ghactions_driver.readme_step import ReadmeStepsManage, ReadmeSteps
from ghactions_driver.readme_parse import readme_parser
from ghactions_driver.telemetry_obj import Telemetry
class ReadmeStepsManage:
"""
# Static methods for manage all readme steps
"""
repo_base_dir = ""
def git_base_dir() -> str:
"""
Get the base directory of the git repo
"""
if ReadmeStepsManage.repo_base_dir == "":
try:
ReadmeStepsManage.repo_base_dir = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode("utf-8")
.strip()
)
raise Exception("Not in git repo")
except Exception:
ReadmeStepsManage.repo_base_dir = Path(__file__).parent.parent.parent.parent.resolve()
print(ReadmeStepsManage.repo_base_dir)
return ReadmeStepsManage.repo_base_dir
def write_workflow(
workflow_name: str, pipeline_name: str, output_telemetry=Telemetry()
) -> None:
# Schedule notebooks at different times to reduce maximum quota usage.
name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16)
schedule_minute = name_hash % 60
schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC
if "tutorials" in workflow_name:
# markdown filename has some exceptions, special handle here
if "chat_with_pdf" in workflow_name:
readme_name = "chat-with-pdf.md"
elif (
"fine_tuning_evaluation_promptflow_quality_improvement" in workflow_name
):
readme_name = "promptflow-quality-improvement.md"
else:
readme_name = "README.md"
readme_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ReadmeSteps.working_dir
/ readme_name
)
# local import to avoid circular import
from .resource_resolver import resolve_tutorial_resource
path_filter = resolve_tutorial_resource(
workflow_name, readme_path.resolve()
)
else:
if (
"flow_with_additional_includes" in workflow_name
or "flow_with_symlinks" in workflow_name
):
# these two flows have dependencies on flow web-classification
# so corresponding workflows should also listen to changes in web-classification
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ "examples/flows/standard/web-classification/**, "
+ f".github/workflows/{workflow_name}.yml ]"
)
else:
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ f".github/workflows/{workflow_name}.yml ]"
)
replacements = {
"steps": ReadmeSteps.step_array,
"workflow_name": workflow_name,
"ci_name": pipeline_name,
"path_filter": path_filter,
"crontab": f"{schedule_minute} {schedule_hour} * * *",
"crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT",
}
workflow_template_path = (
Path(ReadmeStepsManage.git_base_dir())
/ "scripts"
/ "readme"
/ "ghactions_driver"
/ "workflow_templates"
)
target_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ".github"
/ "workflows"
/ f"{workflow_name}.yml"
)
template = Environment(
loader=FileSystemLoader(workflow_template_path.resolve())
).get_template(ReadmeSteps.template)
content = template.render(replacements)
with open(target_path.resolve(), "w", encoding="utf-8") as f:
f.write(content)
print(f"Write readme workflow: {target_path.resolve()}")
output_telemetry.workflow_name = workflow_name
output_telemetry.target_path = target_path
output_telemetry.readme_folder = ReadmeSteps.working_dir
output_telemetry.readme_name = ReadmeSteps.readme_name
output_telemetry.path_filter = path_filter
def readme_parser(filename: str):
real_filename = Path(ReadmeStepsManage.git_base_dir()) / filename
data = pypandoc.convert_file(str(real_filename), "json")
f = io.StringIO(data)
doc = panflute.load(f)
panflute.run_filter(action, prepare, doc=doc)
return doc.full_text
The provided code snippet includes necessary dependencies for implementing the `no_readme_generation_filter` function. Write a Python function `def no_readme_generation_filter(item: Path, index, array) -> bool` to solve the following problem:
If there is no steps in the readme, then no generation
Here is the function:
def no_readme_generation_filter(item: Path, index, array) -> bool:
"""
If there is no steps in the readme, then no generation
"""
try:
if 'build' in str(item): # skip build folder
return False
full_text = readme_parser(item.relative_to(ReadmeStepsManage.git_base_dir()))
if full_text == "":
return False
else:
return True
except Exception as error:
print(error)
return False # generate readme | If there is no steps in the readme, then no generation |
4,594 | from promptflow._sdk._load_functions import load_yaml
from promptflow._sdk._pf_client import PFClient
from ghactions_driver.readme_step import ReadmeStepsManage
from pathlib import Path
import os
import subprocess
import sys
def install(filename):
subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", filename]) | null |
4,595 | import os
import sys
import argparse
from lib.option import initOption
from config.config import pyVersion, urlVersion
from config.data import logger
pyVersion = platform.python_version()
urlVersion = urllib3.__version__
logger = MY_LOGGER
def version_check():
if pyVersion < "3.7.3":
logger.error(
"此Python版本 ('{0}') 不兼容,成功运行Glass你必须使用版本 >= 3.7.3 (访问 ‘https://www.python.org/downloads/’)".format(pyVersion))
exit(0)
if urlVersion > "1.25.8" and pyVersion > "3.8":
logger.error("urllib3库版本 ('{0}') 不兼容,代理容易出错".format(urlVersion))
logger.info('运行 (python3 -m pip install -U "urllib3==1.25.8") 进行库降低版本')
logger.info(
"或者运行 (python3 -m pip install -r requirements.txt) 进行全部库的安装")
exit(0) | null |
4,596 | import os
import sys
import argparse
from lib.option import initOption
from config.config import pyVersion, urlVersion
from config.data import logger
import sys
sys.dont_write_bytecode = True
The provided code snippet includes necessary dependencies for implementing the `modulePath` function. Write a Python function `def modulePath()` to solve the following problem:
This will get us the program's directory, even if we are frozen using py2exe
Here is the function:
def modulePath():
"""
This will get us the program's directory, even if we are frozen
using py2exe
"""
try:
_ = sys.executable if hasattr(sys, "frozen") else __file__
except NameError:
_ = inspect.getsourcefile(modulePath)
return os.path.dirname(os.path.realpath(_)) | This will get us the program's directory, even if we are frozen using py2exe |
4,597 | import os
import sys
import random
import prettytable as pt
from mod.fofa import fmain
from mod.zoomeye import zmain
from mod.quake import qmain
from mod.website import mwebs
from mod.rulesCli import ruleMain
from mod.output import outMain
from lib.proxy import checkProxyFile
from lib.update import update
from lib.common import getScheme
from colorama import init as wininit
from config.config import Version, tosayRun, Banner, fofaApi, zoomeyeApi
from config.data import Urls, Paths, WebInfos, OutInfos, Proxys, confs, logger
def add_options(cmdparse):
if hasattr(cmdparse, "items"):
cmdlines = cmdparse.items()
else:
cmdlines = cmdparse.__dict__.items()
for key, value in cmdlines:
confs[key] = value
def set_path(root):
Paths.root = root
Paths.output = os.path.join(root, 'output')
Paths.config = os.path.join(root, 'config')
Paths.config_py = os.path.join(Paths.config, 'config.py')
Paths.proxyFile = os.path.join(root, 'proxyFile')
def program_start(usage):
print(random.choice(Banner))
if tosayRun:
from config.tosay import todaySay
if todaySay():
print(todaySay())
else:
pass
if len(sys.argv) == 1:
print(usage)
exit(0)
def confs_init():
confs.version = False
confs.url = None
confs.file = None
confs.ip = None
confs.web = None
confs.proxy = None
confs.proxylist = None
confs.updateprogram = False
confs.outputTarget = None
confs.search = None
def set_confs():
if confs.updateprogram:
update()
if confs.version:
logger.info("Version: {0}".format(Version))
exit(0)
if confs.search:
searchType = ["fofa", "eye", "qk"]
if confs.search in set(searchType):
pass
else:
logger.error("参数错误,e.g.(-s fofa or -s eye or -s qk)")
exit(0)
if confs.outputTarget:
outTypes = ["txt", "json", "html", "xls", "csv"]
if confs.outputTarget in set(outTypes):
pass
else:
logger.error("输出格式错误,只支持输出格式为:{0}".format(outTypes))
exit(0)
if confs.ip:
Urls.ips.append(confs.ip)
if confs.url:
if not confs.url.startswith('http'):
confs.url = "http://" + confs.url
Urls.url.append(confs.url)
if confs.file:
with open(confs.file, 'r') as f:
for ip in f.readlines():
if len(ip) != 1:
Urls.ips.append(ip.strip())
if confs.web:
with open(confs.web, 'r') as f:
for web in f.readlines():
if len(web) != 1:
if not web.startswith('http'):
web = "http://" + web
Urls.url.append(web.strip())
if isinstance(confs["proxy"], str):
if ":" in confs["proxy"]:
splits = confs["proxy"].split(":")
try:
if int(splits[2]):
confs["proxy"] = {splits[0]: "{0}:{1}:{2}".format(
splits[0], splits[1], splits[2])}
Proxys.proxyList.append(confs["proxy"])
except ValueError:
logger.error(
"代理地址错误,例如:http://127.0.0.1:8080 or https://127.0.0.1:8080")
exit(0)
elif confs["proxy"] != "all" and confs["proxy"] != "cn":
logger.error(
"参数错误,all表示加载全部IP,cn加载国内IP,自定义例子为:http://127.0.0.1:8080 or https://127.0.0.1:8080")
exit(0)
else:
checkProxyFile(confs["proxy"])
if len(Proxys.proxyList) == 0:
logger.error("本地获取代理失败,请从新获取")
message = input("是否不使用代理访问?[y/N]")
if message != "y":
exit(0)
else:
logger.info("分配IP中")
getScheme()
if confs.proxylist:
if confs.proxylist == "all" or confs.proxylist == "cn":
checkProxyFile(confs.proxylist)
if len(Proxys.proxyList) == 0:
logger.error("本地获取代理失败,请重新获取")
exit(0)
else:
tb = pt.PrettyTable()
tb.field_names = ['Protocol', 'Host']
for p in Proxys.proxyList:
logger.info(p)
for i in p:
tb.add_row([i, p[i]])
print(tb)
logger.info("协议可切换,一般在代理插件里设置http协议,这样避免证书问题")
else:
exit(0)
def runmod():
if Urls.ips:
if confs.search:
if confs.search == "fofa":
logger.info("调用Fofa接口中")
fmain(Urls.ips)
if confs.search == "eye":
logger.info("调用Zoomeye接口中")
zmain(Urls.ips)
if confs.search == "qk":
logger.info("调用Quake接口中")
qmain(Urls.ips)
else:
logger.error("参数错误,e.g.(-s fofa or -s eye or -s qk)")
exit(0)
if Urls.url:
mwebs()
if WebInfos:
ruleMain()
else:
logger.info("获取信息失败")
if OutInfos:
if confs.outputTarget:
outMain(confs.outputTarget)
else:
outMain("txt")
def datas_init():
Urls.url = []
Urls.ips = []
Urls.scheme = []
Proxys.proxyList = []
Proxys.scheme = []
WebInfos = {}
OutInfos = {}
def initOption(usage, root, args):
wininit(autoreset=True)
datas_init()
set_path(root)
program_start(usage)
confs_init()
add_options(args)
set_confs()
runmod() | null |
4,598 | import os
from setuptools import setup, find_packages, dist
import importlib
from pkg_resources import parse_version
import subprocess
import warnings
import os
import sys
import logging
import glob
import numpy
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
cwd = os.path.dirname(os.path.abspath(__file__))
with open(version_txt) as f:
version = f.readline().strip()
def write_version_file():
version_path = os.path.join(cwd, 'kaolin', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version)) | null |
4,599 | import os
from setuptools import setup, find_packages, dist
import importlib
from pkg_resources import parse_version
import subprocess
import warnings
import os
import sys
import logging
import glob
import numpy
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
cwd = os.path.dirname(os.path.abspath(__file__))
with open(version_txt) as f:
version = f.readline().strip()
def get_requirements():
requirements = []
if sys.version_info >= (3, 10):
warnings.warn("usd-core is not compatible with python_version >= 3.10 "
"and won't be installed, please use supported python_version "
"to use USD related features")
with open(os.path.join(cwd, 'tools', 'viz_requirements.txt'), 'r') as f:
for line in f.readlines():
requirements.append(line.strip())
with open(os.path.join(cwd, 'tools', 'requirements.txt'), 'r') as f:
for line in f.readlines():
requirements.append(line.strip())
return requirements | null |
4,600 | import os
from setuptools import setup, find_packages, dist
import importlib
from pkg_resources import parse_version
import subprocess
import warnings
import os
import sys
import logging
import glob
import numpy
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
def get_scripts():
return ['kaolin/experimental/dash3d/kaolin-dash3d'] | null |
4,601 | import os
from setuptools import setup, find_packages, dist
import importlib
from pkg_resources import parse_version
import subprocess
import warnings
import os
import sys
import logging
import glob
import numpy
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
if not torch.cuda.is_available():
if os.getenv('FORCE_CUDA', '0') == '1':
# From: https://github.com/NVIDIA/apex/blob/c4e85f7bf144cb0e368da96d339a6cbd9882cea5/setup.py
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
logging.warning(
"Torch did not find available GPUs on this system.\n"
"If your intention is to cross-compile, this is not an error.\n"
"By default, Kaolin will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0),\n"
"and, if the CUDA version is >= 11.8, Hopper (compute capability 9.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n'
)
if os.getenv("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
if int(bare_metal_minor) == 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
elif int(bare_metal_minor) < 8:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
elif int(bare_metal_major) == 12:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print(f'TORCH_CUDA_ARCH_LIST: {os.environ["TORCH_CUDA_ARCH_LIST"]}')
else:
logging.warning(
"Torch did not find available GPUs on this system.\n"
"Kaolin will install only with CPU support and will have very limited features.\n"
"If your wish to cross-compile for GPU `export FORCE_CUDA=1` before running setup.py."
)
def get_include_dirs():
include_dirs = []
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
_, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
if "CUB_HOME" in os.environ:
logging.warning(f'Including CUB_HOME ({os.environ["CUB_HOME"]}).')
include_dirs.append(os.environ["CUB_HOME"])
else:
if int(bare_metal_major) < 11:
logging.warning(f'Including default CUB_HOME ({os.path.join(cwd, "third_party/cub")}).')
include_dirs.append(os.path.join(cwd, 'third_party/cub'))
return include_dirs
def get_extensions():
extra_compile_args = {'cxx': ['-O3']}
define_macros = []
include_dirs = []
sources = glob.glob('kaolin/csrc/**/*.cpp', recursive=True)
# FORCE_CUDA is for cross-compilation in docker build
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
with_cuda = True
define_macros += [("WITH_CUDA", None), ("THRUST_IGNORE_CUB_VERSION_CHECK", None)]
sources += glob.glob('kaolin/csrc/**/*.cu', recursive=True)
extension = CUDAExtension
extra_compile_args.update({'nvcc': [
'-O3',
'-DWITH_CUDA',
'-DTHRUST_IGNORE_CUB_VERSION_CHECK'
]})
include_dirs = get_include_dirs()
else:
extension = CppExtension
with_cuda = False
extensions = []
extensions.append(
extension(
name='kaolin._C',
sources=sources,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
include_dirs=include_dirs
)
)
# use cudart_static instead
for extension in extensions:
extension.libraries = ['cudart_static' if x == 'cudart' else x
for x in extension.libraries]
use_cython = True
ext = '.pyx' if use_cython else '.cpp'
cython_extensions = [
CppExtension(
'kaolin.ops.mesh.triangle_hash',
sources=[
f'kaolin/cython/ops/mesh/triangle_hash{ext}'
],
include_dirs=[numpy.get_include()],
),
CppExtension(
'kaolin.ops.conversions.mise',
sources=[
f'kaolin/cython/ops/conversions/mise{ext}'
],
),
]
if use_cython:
from Cython.Build import cythonize
from Cython.Compiler import Options
compiler_directives = Options.get_directive_defaults()
compiler_directives["emit_code_comments"] = False
cython_extensions = cythonize(cython_extensions, language='c++',
compiler_directives=compiler_directives)
return extensions + cython_extensions | null |
4,602 | import os
def run_apidoc(_):
def setup(app):
app.connect("builder-inited", run_apidoc) | null |
4,603 | import argparse
import os
import torch
import kaolin as kal
The provided code snippet includes necessary dependencies for implementing the `preprocessing_transform` function. Write a Python function `def preprocessing_transform(inputs)` to solve the following problem:
This the transform used in shapenet dataset __getitem__. Three tasks are done: 1) Get the areas of each faces, so it can be used to sample points 2) Get a proper list of RGB diffuse map 3) Get the material associated to each face
Here is the function:
def preprocessing_transform(inputs):
"""This the transform used in shapenet dataset __getitem__.
Three tasks are done:
1) Get the areas of each faces, so it can be used to sample points
2) Get a proper list of RGB diffuse map
3) Get the material associated to each face
"""
mesh = inputs['mesh']
vertices = mesh.vertices.unsqueeze(0)
faces = mesh.faces
# Some materials don't contain an RGB texture map, so we are considering the single value
# to be a single pixel texture map (1, 3, 1, 1)
# we apply a modulo 1 on the UVs because ShapeNet follows GL_REPEAT behavior (see: https://open.gl/textures)
uvs = torch.nn.functional.pad(mesh.uvs.unsqueeze(0) % 1, (0, 0, 0, 1)) * 2. - 1.
uvs[:, :, 1] = -uvs[:, :, 1]
face_uvs_idx = mesh.face_uvs_idx
face_material_idx = mesh.material_assignments
materials = [m['map_Kd'].permute(2, 0, 1).unsqueeze(0).float() / 255. if 'map_Kd' in m else
m['Kd'].reshape(1, 3, 1, 1)
for m in mesh.materials]
mask = face_uvs_idx == -1
face_uvs_idx[mask] = 0
face_uvs = kal.ops.mesh.index_vertices_by_faces(
uvs, face_uvs_idx
)
face_uvs[:, mask] = 0.
outputs = {
'vertices': vertices,
'faces': faces,
'face_areas': kal.ops.mesh.face_areas(vertices, faces),
'face_uvs': face_uvs,
'materials': materials,
'face_material_idx': face_material_idx,
'name': inputs['name']
}
return outputs | This the transform used in shapenet dataset __getitem__. Three tasks are done: 1) Get the areas of each faces, so it can be used to sample points 2) Get a proper list of RGB diffuse map 3) Get the material associated to each face |
4,604 | import torch
import numpy as np
from typing import Tuple
from kaolin.render.camera import Camera, CameraFOV
def generate_pixel_grid(res_x=None, res_y=None, device='cuda'):
h_coords = torch.arange(res_x, device=device)
w_coords = torch.arange(res_y, device=device)
pixel_y, pixel_x = torch.meshgrid(h_coords, w_coords)
pixel_x = pixel_x + 0.5
pixel_y = pixel_y + 0.5
return pixel_y, pixel_x | null |
4,605 | import torch
import numpy as np
from typing import Tuple
from kaolin.render.camera import Camera, CameraFOV
ray_orig, ray_dir, near, far = generate_perspective_rays(camera, pixel_grid)
def generate_perspective_rays(camera: Camera, pixel_grid: Tuple[torch.Tensor, torch.Tensor]):
# coords_grid should remain immutable (a new tensor is implicitly created here)
pixel_y, pixel_x = pixel_grid
pixel_x = pixel_x.to(camera.device, camera.dtype)
pixel_y = pixel_y.to(camera.device, camera.dtype)
# Account for principal point offset from canvas center
pixel_x = pixel_x - camera.x0
pixel_y = pixel_y + camera.y0
# pixel values are now in range [-1, 1], both tensors are of shape res_y x res_x
pixel_x = 2 * (pixel_x / camera.width) - 1.0
pixel_y = 2 * (pixel_y / camera.height) - 1.0
ray_dir = torch.stack((pixel_x * camera.tan_half_fov(CameraFOV.HORIZONTAL),
-pixel_y * camera.tan_half_fov(CameraFOV.VERTICAL),
-torch.ones_like(pixel_x)), dim=-1)
ray_dir = ray_dir.reshape(-1, 3) # Flatten grid rays to 1D array
ray_orig = torch.zeros_like(ray_dir)
# Transform from camera to world coordinates
ray_orig, ray_dir = camera.extrinsics.inv_transform_rays(ray_orig, ray_dir)
ray_dir /= torch.linalg.norm(ray_dir, dim=-1, keepdim=True)
ray_orig, ray_dir = ray_orig[0], ray_dir[0] # Assume a single camera
return ray_orig, ray_dir, camera.near, camera.far | null |
4,606 | import torch
import kaolin
spc = kaolin.rep.Spc.make_dense(level, device='cuda')
print(f'Input SPC features: {colors.shape}')
color_hierarchy = encode(colors=colors,
octree=spc.octrees,
point_hierachy=spc.point_hierarchies,
pyramids=spc.pyramids,
exsum=spc.exsum,
level=level)
print(f'Final encoded value (average of averages):')
print(color_hierarchy[0])
def encode(colors, octree, point_hierachy, pyramids, exsum, level):
# SPC convolutions are characterized by a set of 'kernel vectors' and corresponding 'weights'.
# kernel_vectors is the "kernel support" -
# a listing of 3D coordinates where the weights of the convolution are non-null,
# in this case a it's a simple dense 2x2x2 grid.
kernel_vectors = torch.tensor([[0,0,0],[0,0,1],[0,1,0],[0,1,1],
[1,0,0],[1,0,1],[1,1,0],[1,1,1]],
dtype=torch.short, device='cuda')
# The weights specify how the input colors 'under' the kernel are mapped to an output color,
# in this case a simple average.
weights = torch.diag(torch.tensor([0.125, 0.125, 0.125, 0.125],
dtype=torch.float32, device='cuda')) # Tensor of (4, 4)
weights = weights.repeat(8,1,1).contiguous() # Tensor of (8, 4, 4)
# Storage for the output color hierarchy is allocated. This includes points at the bottom of the hierarchy,
# as well as intermediate SPC levels (which may store different features)
color_hierarchy = torch.empty((pyramids[0,1,level+1],4), dtype=torch.float32, device='cuda')
# Copy the input colors into the highest level of color_hierarchy. pyramids is used here to select all leaf
# points at the bottom of the hierarchy and set them to some pre-sampled random color. Points at intermediate
# levels are left empty.
color_hierarchy[pyramids[0,1,level]:pyramids[0,1,level+1]] = colors[:]
# Performs the 3d convolutions in a bottom up fashion to 'filter' colors from the previous level
for l in range(level,0,-1):
# Apply the 3d convolution. Note that jump=1 means the inputs and outputs differ by 1 level
# This is analogous to to a stride=2 in grid based convolutions
colors, ll = kaolin.ops.spc.conv3d(octree,
point_hierachy,
l,
pyramids,
exsum,
colors,
weights,
kernel_vectors,
jump=1)
# Copy the output colors into the color hierarchy
color_hierarchy[pyramids[0,1,ll]:pyramids[0,1,l]] = colors[:]
print(f"At level {l}, output feature shape is:\n{colors.shape}")
# Normalize the colors.
color_hierarchy /= color_hierarchy[:,3:]
# Normalization is needed here due to the sparse nature of SPCs. When a point under a kernel is not
# present in the point hierarchy, the corresponding data is treated as zeros. Normalization is equivalent
# to having the filter weights sum to one. This may not always be desirable, e.g. alpha blending.
return color_hierarchy | null |
4,607 | import argparse
import logging
import os
import random
import torch
import sys
import kaolin
The provided code snippet includes necessary dependencies for implementing the `__normalize_vertices` function. Write a Python function `def __normalize_vertices(vertices)` to solve the following problem:
Normalizes vertices to fit an [-1...1] bounding box, common during training, but not necessary for visualization.
Here is the function:
def __normalize_vertices(vertices):
"""
Normalizes vertices to fit an [-1...1] bounding box,
common during training, but not necessary for visualization.
"""
return kaolin.ops.pointcloud.center_points(res.vertices.unsqueeze(0), normalize=True).squeeze(0) * 2 | Normalizes vertices to fit an [-1...1] bounding box, common during training, but not necessary for visualization. |
4,608 | import os
import argparse
from kaolin.io import usd
from kaolin.io.utils import mesh_handler_naive_triangulate
def mesh_handler_naive_triangulate(vertices, face_vertex_counts, *features, face_assignments=None):
def import_kitchen_set(kitchen_set_usd):
# The Kitchen Set example organizes assets in a particular way. Since we want to import complete objects and not
# not each separate part of an object, we'll find all the paths that are named :code:`Geom`:
scene_paths = usd.get_scene_paths(kitchen_set_usd, r'.*/Geom$')
# The meshes in this dataset have a heterogeneous topology, meaning the number of vertices
# for each polygon varies. To deal with those, we'll pass in a handler function that will
# homogenize those meshes to homogenous triangle meshes.
usd_meshes = usd.import_meshes(
kitchen_set_usd,
scene_paths=scene_paths,
heterogeneous_mesh_handler=mesh_handler_naive_triangulate
)
return usd_meshes | null |
4,609 | import os
import argparse
from kaolin.io import usd
from kaolin.io.utils import mesh_handler_naive_triangulate
def save_kitchen_set_dataset(meshes, out_dir):
for i, m in enumerate(meshes):
out_path = os.path.join(out_dir, f'mesh_{i}.usd')
usd.export_mesh(
file_path=out_path,
vertices=m.vertices[..., [0, 2, 1]], # flipping Y and Z to make models Y-up
faces=m.faces
) | null |
4,610 | import torch
from tqdm import tqdm
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x : x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires):
embed_kwargs = {
'include_input' : True,
'input_dims' : 3,
'max_freq_log2' : multires-1,
'num_freqs' : multires,
'log_sampling' : True,
'periodic_fns' : [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj : eo.embed(x)
return embed, embedder_obj.out_dim | null |
4,611 | from collections import deque
from termcolor import colored
def push_pop_octree(q, oct_item):
def format_octree_str(octree_byte, octree_path, level_idx, max_level):
def describe_octree(octree, level, limit_levels=None):
bit_counter = lambda x: bin(x).count('1')
level_idx, curr_level_remaining_cells, next_level_cells = 1, 1, 0
octree_paths = deque('*')
for oct_idx, octree_byte in enumerate(octree):
octree_path = push_pop_octree(octree_paths, octree_byte)
if limit_levels is None or level_idx in limit_levels:
print(format_octree_str(octree_byte, octree_path, level_idx, level))
curr_level_remaining_cells -= 1
next_level_cells += bit_counter(octree_byte)
if not curr_level_remaining_cells:
level_idx += 1
curr_level_remaining_cells = next_level_cells
next_level_cells = 0 | null |
4,612 | import torch
from kaolin import _C
def sided_distance(p1, p2):
r"""For each point in :math:`p_{1i} \in P_1` will find the indices and squared euclidean
distances of the closest point :math:`P_2`, as following:
:math:`\text{sided_distance}(p_{1i}, P_2) = \min\limits_{p_{2j}\in{P_2}}(||p_{1i} - p_{2j}||_2^2)`
Args:
p1 (torch.Tensor): Pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points1}, 3)`.
p2 (torch.Tensor): Pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points2}, 3)`.
Returns:
(torch.Tensor, torch.Tensor):
The indices and distances from points in p1 to the
corresponding closest points in p2, both have shape of
:math:`(\text{batch_size}, \text{num_points1})`.
Example:
>>> p1 = torch.tensor([[[5.9336, 4.9742, 8.1047]],
... [[4.1939, 3.3612, 9.5407]]], device='cuda', dtype=torch.float)
>>> p2 = torch.tensor([[[1.6998, 0.7719, 2.9987],
... [0.1812, 8.9342, 10.0285]],
... [[10.0184, 0.3928, 5.2545],
... [4.2934, 11.2127, 4.5247]]], device='cuda', dtype=torch.float)
>>> distance, idx = sided_distance(p1, p2)
>>> distance
tensor([[52.4727],
[61.1077]], device='cuda:0')
>>> idx
tensor([[1],
[0]], device='cuda:0')
"""
dist, idx = _SidedDistanceFunction.apply(p1, p2)
return dist, idx
The provided code snippet includes necessary dependencies for implementing the `chamfer_distance` function. Write a Python function `def chamfer_distance(p1, p2, w1=1., w2=1., squared=True)` to solve the following problem:
r"""Computes the chamfer distance between two pointclouds, defined as following: :math:`\dfrac{w_1}{|P_1|}\sum\limits_{p_{1i} \in P_1}\min\limits_{p_{2j} \in P_2}(||p_{1i} - p_{2j}||_2^2) + \dfrac{w_2}{|P_2|}\sum\limits_{p_{2j} \in P_2}\min\limits_{p_{1i} \in P_1}(||p_{2j} - p_{1i}||_2^2)` Args: p1 (torch.Tensor): Pointclouds, of shape :math:`(\text{batch_size}, \text{num_points1}, 3)`. p2 (torch.Tensor): Pointclouds, of shape :math:`(\text{batch_size}, \text{num_points2}, 3)`. w1 (float, optional): Weighting of forward direction. Default: 1. w2 (float, optional): Weighting of backward direction. Default: 1. squared (bool, optional): Use the squared sided distance. Default: True. Returns: (torch.Tensor): Chamfer distance between two pointclouds p1 and p2, of shape :math:`(\text{batch_size})`. Example: >>> p1 = torch.tensor([[[8.8977, 4.1709, 1.2839], ... [8.5640, 7.7767, 9.4214]], ... [[0.5431, 6.4495, 11.4914], ... [3.2126, 8.0865, 3.1018]]], device='cuda', dtype=torch.float) >>> p2 = torch.tensor([[[6.9340, 6.1152, 3.4435], ... [0.1032, 9.8181, 11.3350]], ... [[11.4006, 2.2154, 7.9589], ... [4.2586, 1.4133, 7.2606]]], device='cuda', dtype=torch.float) >>> chamfer_distance(p1, p2) tensor([ 72.5838, 151.0809], device='cuda:0')
Here is the function:
def chamfer_distance(p1, p2, w1=1., w2=1., squared=True):
r"""Computes the chamfer distance between two pointclouds, defined as following:
:math:`\dfrac{w_1}{|P_1|}\sum\limits_{p_{1i} \in P_1}\min\limits_{p_{2j} \in P_2}(||p_{1i} - p_{2j}||_2^2) +
\dfrac{w_2}{|P_2|}\sum\limits_{p_{2j} \in P_2}\min\limits_{p_{1i} \in P_1}(||p_{2j} - p_{1i}||_2^2)`
Args:
p1 (torch.Tensor): Pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points1}, 3)`.
p2 (torch.Tensor): Pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points2}, 3)`.
w1 (float, optional): Weighting of forward direction. Default: 1.
w2 (float, optional): Weighting of backward direction. Default: 1.
squared (bool, optional): Use the squared sided distance.
Default: True.
Returns:
(torch.Tensor):
Chamfer distance between two pointclouds p1 and p2,
of shape :math:`(\text{batch_size})`.
Example:
>>> p1 = torch.tensor([[[8.8977, 4.1709, 1.2839],
... [8.5640, 7.7767, 9.4214]],
... [[0.5431, 6.4495, 11.4914],
... [3.2126, 8.0865, 3.1018]]], device='cuda', dtype=torch.float)
>>> p2 = torch.tensor([[[6.9340, 6.1152, 3.4435],
... [0.1032, 9.8181, 11.3350]],
... [[11.4006, 2.2154, 7.9589],
... [4.2586, 1.4133, 7.2606]]], device='cuda', dtype=torch.float)
>>> chamfer_distance(p1, p2)
tensor([ 72.5838, 151.0809], device='cuda:0')
"""
sdist1 = sided_distance(p1, p2)[0]
sdist2 = sided_distance(p2, p1)[0]
if not squared:
sdist1 = torch.sqrt(sdist1)
sdist2 = torch.sqrt(sdist2)
dist_to_p2 = sdist1.mean(dim=-1)
dist_to_p1 = sdist2.mean(dim=-1)
if (w1 == 1 and w2 == 1):
distance = dist_to_p2 + dist_to_p1
else:
distance = w1 * dist_to_p2 + w2 * dist_to_p1
return distance | r"""Computes the chamfer distance between two pointclouds, defined as following: :math:`\dfrac{w_1}{|P_1|}\sum\limits_{p_{1i} \in P_1}\min\limits_{p_{2j} \in P_2}(||p_{1i} - p_{2j}||_2^2) + \dfrac{w_2}{|P_2|}\sum\limits_{p_{2j} \in P_2}\min\limits_{p_{1i} \in P_1}(||p_{2j} - p_{1i}||_2^2)` Args: p1 (torch.Tensor): Pointclouds, of shape :math:`(\text{batch_size}, \text{num_points1}, 3)`. p2 (torch.Tensor): Pointclouds, of shape :math:`(\text{batch_size}, \text{num_points2}, 3)`. w1 (float, optional): Weighting of forward direction. Default: 1. w2 (float, optional): Weighting of backward direction. Default: 1. squared (bool, optional): Use the squared sided distance. Default: True. Returns: (torch.Tensor): Chamfer distance between two pointclouds p1 and p2, of shape :math:`(\text{batch_size})`. Example: >>> p1 = torch.tensor([[[8.8977, 4.1709, 1.2839], ... [8.5640, 7.7767, 9.4214]], ... [[0.5431, 6.4495, 11.4914], ... [3.2126, 8.0865, 3.1018]]], device='cuda', dtype=torch.float) >>> p2 = torch.tensor([[[6.9340, 6.1152, 3.4435], ... [0.1032, 9.8181, 11.3350]], ... [[11.4006, 2.2154, 7.9589], ... [4.2586, 1.4133, 7.2606]]], device='cuda', dtype=torch.float) >>> chamfer_distance(p1, p2) tensor([ 72.5838, 151.0809], device='cuda:0') |
4,613 | import torch
from kaolin import _C
def sided_distance(p1, p2):
r"""For each point in :math:`p_{1i} \in P_1` will find the indices and squared euclidean
distances of the closest point :math:`P_2`, as following:
:math:`\text{sided_distance}(p_{1i}, P_2) = \min\limits_{p_{2j}\in{P_2}}(||p_{1i} - p_{2j}||_2^2)`
Args:
p1 (torch.Tensor): Pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points1}, 3)`.
p2 (torch.Tensor): Pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points2}, 3)`.
Returns:
(torch.Tensor, torch.Tensor):
The indices and distances from points in p1 to the
corresponding closest points in p2, both have shape of
:math:`(\text{batch_size}, \text{num_points1})`.
Example:
>>> p1 = torch.tensor([[[5.9336, 4.9742, 8.1047]],
... [[4.1939, 3.3612, 9.5407]]], device='cuda', dtype=torch.float)
>>> p2 = torch.tensor([[[1.6998, 0.7719, 2.9987],
... [0.1812, 8.9342, 10.0285]],
... [[10.0184, 0.3928, 5.2545],
... [4.2934, 11.2127, 4.5247]]], device='cuda', dtype=torch.float)
>>> distance, idx = sided_distance(p1, p2)
>>> distance
tensor([[52.4727],
[61.1077]], device='cuda:0')
>>> idx
tensor([[1],
[0]], device='cuda:0')
"""
dist, idx = _SidedDistanceFunction.apply(p1, p2)
return dist, idx
The provided code snippet includes necessary dependencies for implementing the `f_score` function. Write a Python function `def f_score(gt_points, pred_points, radius=0.01, eps=1e-8)` to solve the following problem:
r"""Computes the f-score of two sets of points, with a hit defined by two point existing within a defined radius of each other. Args: gt_points (torch.Tensor): Ground truth pointclouds, of shape :math:`(\text{batch_size}, \text{num_gt_points}, 3)`. pred_points (torch.Tensor): Predicted points pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. radius (float): Radius from a point to define a hit. Default: 0.01 eps (float): Epsilon used to calculate f score. Default: 1e-8. Returns: (torch.Tensor): Computed f-score tensor of shape :math:`(\text{batch_size})`, of same dtype as input pred_points. Example: >>> p1 = torch.tensor([[[8.8977, 4.1709, 1.2839], ... [8.5640, 7.7767, 9.4214]], ... [[0.5431, 6.4495, 11.4914], ... [3.2126, 8.0865, 3.1018]]], device='cuda', dtype=torch.float) >>> p2 = torch.tensor([[[9.4863, 4.2249, 0.1712], ... [8.1783, 8.5310, 8.5119]], ... [[-0.0020699, 6.4429, 12.3], ... [3.8386, 8.3585, 4.7662]]], device='cuda', dtype=torch.float) >>> f_score(p1, p2, radius=1) tensor([0.0000, 0.5000], device='cuda:0') >>> f_score(p1, p2, radius=1.5) tensor([1.0000, 0.5000], device='cuda:0')
Here is the function:
def f_score(gt_points, pred_points, radius=0.01, eps=1e-8):
r"""Computes the f-score of two sets of points, with a hit defined
by two point existing within a defined radius of each other.
Args:
gt_points (torch.Tensor): Ground truth pointclouds, of shape
:math:`(\text{batch_size}, \text{num_gt_points}, 3)`.
pred_points (torch.Tensor): Predicted points pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points}, 3)`.
radius (float): Radius from a point to define a hit.
Default: 0.01
eps (float): Epsilon used to calculate f score.
Default: 1e-8.
Returns:
(torch.Tensor):
Computed f-score tensor of shape :math:`(\text{batch_size})`,
of same dtype as input pred_points.
Example:
>>> p1 = torch.tensor([[[8.8977, 4.1709, 1.2839],
... [8.5640, 7.7767, 9.4214]],
... [[0.5431, 6.4495, 11.4914],
... [3.2126, 8.0865, 3.1018]]], device='cuda', dtype=torch.float)
>>> p2 = torch.tensor([[[9.4863, 4.2249, 0.1712],
... [8.1783, 8.5310, 8.5119]],
... [[-0.0020699, 6.4429, 12.3],
... [3.8386, 8.3585, 4.7662]]], device='cuda', dtype=torch.float)
>>> f_score(p1, p2, radius=1)
tensor([0.0000, 0.5000], device='cuda:0')
>>> f_score(p1, p2, radius=1.5)
tensor([1.0000, 0.5000], device='cuda:0')
"""
pred_distances = torch.sqrt(sided_distance(gt_points, pred_points)[0])
gt_distances = torch.sqrt(sided_distance(pred_points, gt_points)[0])
data_type = gt_points.dtype
fn = torch.sum(pred_distances > radius, dim=1).type(data_type)
fp = torch.sum(gt_distances > radius, dim=1).type(data_type)
tp = (gt_distances.shape[1] - fp).type(data_type)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_score = 2 * (precision * recall) / (precision + recall + eps)
return f_score | r"""Computes the f-score of two sets of points, with a hit defined by two point existing within a defined radius of each other. Args: gt_points (torch.Tensor): Ground truth pointclouds, of shape :math:`(\text{batch_size}, \text{num_gt_points}, 3)`. pred_points (torch.Tensor): Predicted points pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. radius (float): Radius from a point to define a hit. Default: 0.01 eps (float): Epsilon used to calculate f score. Default: 1e-8. Returns: (torch.Tensor): Computed f-score tensor of shape :math:`(\text{batch_size})`, of same dtype as input pred_points. Example: >>> p1 = torch.tensor([[[8.8977, 4.1709, 1.2839], ... [8.5640, 7.7767, 9.4214]], ... [[0.5431, 6.4495, 11.4914], ... [3.2126, 8.0865, 3.1018]]], device='cuda', dtype=torch.float) >>> p2 = torch.tensor([[[9.4863, 4.2249, 0.1712], ... [8.1783, 8.5310, 8.5119]], ... [[-0.0020699, 6.4429, 12.3], ... [3.8386, 8.3585, 4.7662]]], device='cuda', dtype=torch.float) >>> f_score(p1, p2, radius=1) tensor([0.0000, 0.5000], device='cuda:0') >>> f_score(p1, p2, radius=1.5) tensor([1.0000, 0.5000], device='cuda:0') |
4,614 | import torch
from kaolin import _C
The provided code snippet includes necessary dependencies for implementing the `_sided_distance` function. Write a Python function `def _sided_distance(p1, p2)` to solve the following problem:
Pytorch version of sided distances for testing.
Here is the function:
def _sided_distance(p1, p2):
"""
Pytorch version of sided distances for testing.
"""
batch_size = p1.shape[0]
dists = (p1.reshape(batch_size, -1, 1, 3) - p2.reshape(batch_size, 1, -1, 3)) ** 2
dists = torch.sum(dists, dim=-1)
dist = torch.min(dists, dim=-1)
return dist.values | Pytorch version of sided distances for testing. |
4,615 | import torch
from kaolin import _C
from ..ops.mesh import uniform_laplacian
class _UnbatchedTriangleDistanceCuda(torch.autograd.Function):
def forward(ctx, points, face_vertices):
num_points = points.shape[0]
num_faces = face_vertices.shape[0]
min_dist = torch.zeros((num_points), device=points.device, dtype=points.dtype)
min_dist_idx = torch.zeros((num_points), device=points.device, dtype=torch.long)
dist_type = torch.zeros((num_points), device=points.device, dtype=torch.int32)
_C.metrics.unbatched_triangle_distance_forward_cuda(
points, face_vertices, min_dist, min_dist_idx, dist_type)
ctx.save_for_backward(points.contiguous(), face_vertices.contiguous(),
min_dist_idx, dist_type)
ctx.mark_non_differentiable(min_dist_idx, dist_type)
return min_dist, min_dist_idx, dist_type
def backward(ctx, grad_dist, grad_face_idx, grad_dist_type):
points, face_vertices, face_idx, dist_type = ctx.saved_tensors
grad_dist = grad_dist.contiguous()
grad_points = torch.zeros_like(points)
grad_face_vertices = torch.zeros_like(face_vertices)
_C.metrics.unbatched_triangle_distance_backward_cuda(
grad_dist, points, face_vertices, face_idx, dist_type,
grad_points, grad_face_vertices)
return grad_points, grad_face_vertices
def _unbatched_naive_point_to_mesh_distance(points, face_vertices):
"""
description of distance type:
- 0: distance to face
- 1: distance to vertice 0
- 2: distance to vertice 1
- 3: distance to vertice 2
- 4: distance to edge 0-1
- 5: distance to edge 1-2
- 6: distance to edge 2-0
Args:
points (torch.Tensor): of shape (num_points, 3).
face_vertices (torch.LongTensor): of shape (num_faces, 3, 3).
Returns:
(torch.Tensor, torch.LongTensor, torch.IntTensor):
- distance, of shape (num_points).
- face_idx, of shape (num_points).
- distance_type, of shape (num_points).
"""
num_points = points.shape[0]
num_faces = face_vertices.shape[0]
device = points.device
dtype = points.dtype
v1 = face_vertices[:, 0]
v2 = face_vertices[:, 1]
v3 = face_vertices[:, 2]
e21 = v2 - v1
e32 = v3 - v2
e13 = v1 - v3
normals = -torch.cross(e21, e13)
uab = _project_edge(v1.view(1, -1, 3), e21.view(1, -1, 3), points.view(-1, 1, 3))
ubc = _project_edge(v2.view(1, -1, 3), e32.view(1, -1, 3), points.view(-1, 1, 3))
uca = _project_edge(v3.view(1, -1, 3), e13.view(1, -1, 3), points.view(-1, 1, 3))
is_type1 = (uca > 1.) & (uab < 0.)
is_type2 = (uab > 1.) & (ubc < 0.)
is_type3 = (ubc > 1.) & (uca < 0.)
is_type4 = (uab >= 0.) & (uab <= 1.) & _is_not_above(v1, e21, normals, points)
is_type5 = (ubc >= 0.) & (ubc <= 1.) & _is_not_above(v2, e32, normals, points)
is_type6 = (uca >= 0.) & (uca <= 1.) & _is_not_above(v3, e13, normals, points)
is_type0 = ~(is_type1 | is_type2 | is_type3 | is_type4 | is_type5 | is_type6)
face_idx = torch.zeros(num_points, device=device, dtype=torch.long)
all_closest_points = torch.zeros((num_points, num_faces, 3), device=device,
dtype=dtype)
all_type0_idx = torch.where(is_type0)
all_type1_idx = torch.where(is_type1)
all_type2_idx = torch.where(is_type2)
all_type3_idx = torch.where(is_type3)
all_type4_idx = torch.where(is_type4)
all_type5_idx = torch.where(is_type5)
all_type6_idx = torch.where(is_type6)
all_types = is_type1.int() + is_type2.int() * 2 + is_type3.int() * 3 + \
is_type4.int() * 4 + is_type5.int() * 5 + is_type6.int() * 6
all_closest_points[all_type0_idx] = _project_plane(
v1[all_type0_idx[1]], normals[all_type0_idx[1]], points[all_type0_idx[0]])
all_closest_points[all_type1_idx] = v1.view(-1, 3)[all_type1_idx[1]]
all_closest_points[all_type2_idx] = v2.view(-1, 3)[all_type2_idx[1]]
all_closest_points[all_type3_idx] = v3.view(-1, 3)[all_type3_idx[1]]
all_closest_points[all_type4_idx] = _point_at(v1[all_type4_idx[1]], e21[all_type4_idx[1]],
uab[all_type4_idx])
all_closest_points[all_type5_idx] = _point_at(v2[all_type5_idx[1]], e32[all_type5_idx[1]],
ubc[all_type5_idx])
all_closest_points[all_type6_idx] = _point_at(v3[all_type6_idx[1]], e13[all_type6_idx[1]],
uca[all_type6_idx])
all_vec = (all_closest_points - points.view(-1, 1, 3))
all_dist = _compute_dot(all_vec, all_vec)
_, min_dist_idx = torch.min(all_dist, dim=-1)
dist_type = all_types[torch.arange(num_points, device=device), min_dist_idx]
torch.cuda.synchronize()
# Recompute the shortest distances
# This reduce the backward pass to the closest faces instead of all faces
# O(num_points) vs O(num_points * num_faces)
selected_face_vertices = face_vertices[min_dist_idx]
v1 = selected_face_vertices[:, 0]
v2 = selected_face_vertices[:, 1]
v3 = selected_face_vertices[:, 2]
e21 = v2 - v1
e32 = v3 - v2
e13 = v1 - v3
normals = -torch.cross(e21, e13)
uab = _project_edge(v1, e21, points)
ubc = _project_edge(v2, e32, points)
uca = _project_edge(v3, e13, points)
counter_p = torch.zeros((num_points, 3), device=device, dtype=dtype)
cond = (dist_type == 1)
counter_p[cond] = v1[cond]
cond = (dist_type == 2)
counter_p[cond] = v2[cond]
cond = (dist_type == 3)
counter_p[cond] = v3[cond]
cond = (dist_type == 4)
counter_p[cond] = _point_at(v1, e21, uab)[cond]
cond = (dist_type == 5)
counter_p[cond] = _point_at(v2, e32, ubc)[cond]
cond = (dist_type == 6)
counter_p[cond] = _point_at(v3, e13, uca)[cond]
cond = (dist_type == 0)
counter_p[cond] = _project_plane(v1, normals, points)[cond]
min_dist = torch.sum((counter_p - points) ** 2, dim=-1)
return min_dist, min_dist_idx, dist_type
The provided code snippet includes necessary dependencies for implementing the `point_to_mesh_distance` function. Write a Python function `def point_to_mesh_distance(pointclouds, face_vertices)` to solve the following problem:
r"""Computes the distances from pointclouds to meshes (represented by vertices and faces). For each point in the pointcloud, it finds the nearest triangle in the mesh, and calculated its distance to that triangle. .. note:: The calculated distance is the squared euclidean distance. Type 0 indicates the distance is from a point on the surface of the triangle. Type 1 to 3 indicates the distance is from a point to a vertices. Type 4 to 6 indicates the distance is from a point to an edge. Args: pointclouds (torch.Tensor): pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. face_vertices (torch.Tensor): vertices of each face of meshes, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. Returns: (torch.Tensor, torch.LongTensor, torch.IntTensor): - Distances between pointclouds and meshes, of shape :math:`(\text{batch_size}, \text{num_points})`. - face indices selected, of shape :math:`(\text{batch_size}, \text{num_points})`. - Types of distance of shape :math:`(\text{batch_size}, \text{num_points})`. Example: >>> from kaolin.ops.mesh import index_vertices_by_faces >>> point = torch.tensor([[[0.5, 0.5, 0.5], ... [3., 4., 5.]]], device='cuda') >>> vertices = torch.tensor([[[0., 0., 0.], ... [0., 1., 0.], ... [0., 0., 1.]]], device='cuda') >>> faces = torch.tensor([[0, 1, 2]], dtype=torch.long, device='cuda') >>> face_vertices = index_vertices_by_faces(vertices, faces) >>> distance, index, dist_type = point_to_mesh_distance(point, face_vertices) >>> distance tensor([[ 0.2500, 41.0000]], device='cuda:0') >>> index tensor([[0, 0]], device='cuda:0') >>> dist_type tensor([[5, 5]], device='cuda:0', dtype=torch.int32)
Here is the function:
def point_to_mesh_distance(pointclouds, face_vertices):
r"""Computes the distances from pointclouds to meshes (represented by vertices and faces).
For each point in the pointcloud, it finds the nearest triangle
in the mesh, and calculated its distance to that triangle.
.. note::
The calculated distance is the squared euclidean distance.
Type 0 indicates the distance is from a point on the surface of the triangle.
Type 1 to 3 indicates the distance is from a point to a vertices.
Type 4 to 6 indicates the distance is from a point to an edge.
Args:
pointclouds (torch.Tensor):
pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`.
face_vertices (torch.Tensor):
vertices of each face of meshes,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`.
Returns:
(torch.Tensor, torch.LongTensor, torch.IntTensor):
- Distances between pointclouds and meshes,
of shape :math:`(\text{batch_size}, \text{num_points})`.
- face indices selected, of shape :math:`(\text{batch_size}, \text{num_points})`.
- Types of distance of shape :math:`(\text{batch_size}, \text{num_points})`.
Example:
>>> from kaolin.ops.mesh import index_vertices_by_faces
>>> point = torch.tensor([[[0.5, 0.5, 0.5],
... [3., 4., 5.]]], device='cuda')
>>> vertices = torch.tensor([[[0., 0., 0.],
... [0., 1., 0.],
... [0., 0., 1.]]], device='cuda')
>>> faces = torch.tensor([[0, 1, 2]], dtype=torch.long, device='cuda')
>>> face_vertices = index_vertices_by_faces(vertices, faces)
>>> distance, index, dist_type = point_to_mesh_distance(point, face_vertices)
>>> distance
tensor([[ 0.2500, 41.0000]], device='cuda:0')
>>> index
tensor([[0, 0]], device='cuda:0')
>>> dist_type
tensor([[5, 5]], device='cuda:0', dtype=torch.int32)
"""
batch_size = pointclouds.shape[0]
num_points = pointclouds.shape[1]
device = pointclouds.device
dtype = pointclouds.dtype
distance = []
face_idx = []
dist_type = []
for i in range(batch_size):
if pointclouds.is_cuda:
cur_dist, cur_face_idx, cur_dist_type = _UnbatchedTriangleDistanceCuda.apply(
pointclouds[i], face_vertices[i])
else:
cur_dist, cur_face_idx, cur_dist_type = _unbatched_naive_point_to_mesh_distance(
pointclouds[i], face_vertices[i])
distance.append(cur_dist)
face_idx.append(cur_face_idx)
dist_type.append(cur_dist_type)
return torch.stack(distance, dim=0), torch.stack(face_idx, dim=0), \
torch.stack(dist_type, dim=0) | r"""Computes the distances from pointclouds to meshes (represented by vertices and faces). For each point in the pointcloud, it finds the nearest triangle in the mesh, and calculated its distance to that triangle. .. note:: The calculated distance is the squared euclidean distance. Type 0 indicates the distance is from a point on the surface of the triangle. Type 1 to 3 indicates the distance is from a point to a vertices. Type 4 to 6 indicates the distance is from a point to an edge. Args: pointclouds (torch.Tensor): pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. face_vertices (torch.Tensor): vertices of each face of meshes, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. Returns: (torch.Tensor, torch.LongTensor, torch.IntTensor): - Distances between pointclouds and meshes, of shape :math:`(\text{batch_size}, \text{num_points})`. - face indices selected, of shape :math:`(\text{batch_size}, \text{num_points})`. - Types of distance of shape :math:`(\text{batch_size}, \text{num_points})`. Example: >>> from kaolin.ops.mesh import index_vertices_by_faces >>> point = torch.tensor([[[0.5, 0.5, 0.5], ... [3., 4., 5.]]], device='cuda') >>> vertices = torch.tensor([[[0., 0., 0.], ... [0., 1., 0.], ... [0., 0., 1.]]], device='cuda') >>> faces = torch.tensor([[0, 1, 2]], dtype=torch.long, device='cuda') >>> face_vertices = index_vertices_by_faces(vertices, faces) >>> distance, index, dist_type = point_to_mesh_distance(point, face_vertices) >>> distance tensor([[ 0.2500, 41.0000]], device='cuda:0') >>> index tensor([[0, 0]], device='cuda:0') >>> dist_type tensor([[5, 5]], device='cuda:0', dtype=torch.int32) |
4,616 | import torch
from kaolin import _C
from ..ops.mesh import uniform_laplacian
The provided code snippet includes necessary dependencies for implementing the `average_edge_length` function. Write a Python function `def average_edge_length(vertices, faces)` to solve the following problem:
r"""Returns the average length of each faces in a mesh. Args: vertices (torch.Tensor): Batched vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): Faces, of shape :math:`(\text{num_faces}, 3)`. Returns: (torch.Tensor): average length of each edges in a face, of shape :math:`(\text{batch_size}, \text{num_faces})`. Example: >>> vertices = torch.tensor([[[1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2]]) >>> average_edge_length(vertices, faces) tensor([[1.4142]])
Here is the function:
def average_edge_length(vertices, faces):
r"""Returns the average length of each faces in a mesh.
Args:
vertices (torch.Tensor): Batched vertices, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor): Faces, of shape :math:`(\text{num_faces}, 3)`.
Returns:
(torch.Tensor):
average length of each edges in a face, of shape
:math:`(\text{batch_size}, \text{num_faces})`.
Example:
>>> vertices = torch.tensor([[[1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> faces = torch.tensor([[0, 1, 2]])
>>> average_edge_length(vertices, faces)
tensor([[1.4142]])
"""
batch_size = vertices.shape[0]
p1 = torch.index_select(vertices, 1, faces[:, 0])
p2 = torch.index_select(vertices, 1, faces[:, 1])
p3 = torch.index_select(vertices, 1, faces[:, 2])
# get edge lentgh
e1 = p2 - p1
e2 = p3 - p1
e3 = p2 - p3
el1 = torch.sqrt((torch.sum(e1**2, dim=2)))
el2 = torch.sqrt((torch.sum(e2**2, dim=2)))
el3 = torch.sqrt((torch.sum(e3**2, dim=2)))
edge_length = (el1 + el2 + el3) / 3.
return edge_length | r"""Returns the average length of each faces in a mesh. Args: vertices (torch.Tensor): Batched vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): Faces, of shape :math:`(\text{num_faces}, 3)`. Returns: (torch.Tensor): average length of each edges in a face, of shape :math:`(\text{batch_size}, \text{num_faces})`. Example: >>> vertices = torch.tensor([[[1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2]]) >>> average_edge_length(vertices, faces) tensor([[1.4142]]) |
4,617 | import torch
from kaolin import _C
from ..ops.mesh import uniform_laplacian
The provided code snippet includes necessary dependencies for implementing the `uniform_laplacian_smoothing` function. Write a Python function `def uniform_laplacian_smoothing(vertices, faces)` to solve the following problem:
r"""Calculates the uniform laplacian smoothing of meshes. The position of updated vertices is defined as :math:`V_i = \frac{1}{N} * \sum^{N}_{j=1}V_j`, where :math:`N` is the number of neighbours of :math:`V_i`, :math:`V_j` is the position of the j-th adjacent vertex. Args: vertices (torch.Tensor): Vertices of the meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): Faces of the meshes, of shape :math:`(\text{num_faces}, \text{face_size})`. Returns: (torch.FloatTensor): smoothed vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. Example: >>> vertices = torch.tensor([[[1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2]]) >>> uniform_laplacian_smoothing(vertices, faces) tensor([[[0.0000, 0.5000, 0.5000], [0.5000, 0.0000, 0.5000], [0.5000, 0.5000, 0.0000]]])
Here is the function:
def uniform_laplacian_smoothing(vertices, faces):
r"""Calculates the uniform laplacian smoothing of meshes.
The position of updated vertices is defined as :math:`V_i = \frac{1}{N} * \sum^{N}_{j=1}V_j`,
where :math:`N` is the number of neighbours of :math:`V_i`, :math:`V_j` is the position of the
j-th adjacent vertex.
Args:
vertices (torch.Tensor):
Vertices of the meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor):
Faces of the meshes, of shape :math:`(\text{num_faces}, \text{face_size})`.
Returns:
(torch.FloatTensor):
smoothed vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`.
Example:
>>> vertices = torch.tensor([[[1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> faces = torch.tensor([[0, 1, 2]])
>>> uniform_laplacian_smoothing(vertices, faces)
tensor([[[0.0000, 0.5000, 0.5000],
[0.5000, 0.0000, 0.5000],
[0.5000, 0.5000, 0.0000]]])
"""
dtype = vertices.dtype
num_vertices = vertices.shape[1]
laplacian_matrix = uniform_laplacian(num_vertices, faces).to(dtype)
smoothed_vertices = torch.matmul(laplacian_matrix, vertices) + vertices
return smoothed_vertices | r"""Calculates the uniform laplacian smoothing of meshes. The position of updated vertices is defined as :math:`V_i = \frac{1}{N} * \sum^{N}_{j=1}V_j`, where :math:`N` is the number of neighbours of :math:`V_i`, :math:`V_j` is the position of the j-th adjacent vertex. Args: vertices (torch.Tensor): Vertices of the meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): Faces of the meshes, of shape :math:`(\text{num_faces}, \text{face_size})`. Returns: (torch.FloatTensor): smoothed vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. Example: >>> vertices = torch.tensor([[[1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2]]) >>> uniform_laplacian_smoothing(vertices, faces) tensor([[[0.0000, 0.5000, 0.5000], [0.5000, 0.0000, 0.5000], [0.5000, 0.5000, 0.0000]]]) |
4,618 | import torch
The provided code snippet includes necessary dependencies for implementing the `mask_iou` function. Write a Python function `def mask_iou(lhs_mask, rhs_mask)` to solve the following problem:
r"""Compute the Intersection over Union of two segmentation masks. Args: lhs_mask (torch.FloatTensor): A segmentation mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`. rhs_mask (torch.FloatTensor): A segmentation mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`. Returns: (torch.FloatTensor): The IoU loss, as a torch scalar.
Here is the function:
def mask_iou(lhs_mask, rhs_mask):
r"""Compute the Intersection over Union of two segmentation masks.
Args:
lhs_mask (torch.FloatTensor):
A segmentation mask, of shape
:math:`(\text{batch_size}, \text{height}, \text{width})`.
rhs_mask (torch.FloatTensor):
A segmentation mask, of shape
:math:`(\text{batch_size}, \text{height}, \text{width})`.
Returns:
(torch.FloatTensor): The IoU loss, as a torch scalar.
"""
batch_size, height, width = lhs_mask.shape
assert rhs_mask.shape == lhs_mask.shape
sil_mul = lhs_mask * rhs_mask
sil_add = lhs_mask + rhs_mask
iou_up = torch.sum(sil_mul.reshape(batch_size, -1), dim=1)
iou_down = torch.sum((sil_add - sil_mul).reshape(batch_size, -1), dim=1)
iou_neg = iou_up / (iou_down + 1e-10)
mask_loss = 1.0 - torch.mean(iou_neg)
return mask_loss | r"""Compute the Intersection over Union of two segmentation masks. Args: lhs_mask (torch.FloatTensor): A segmentation mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`. rhs_mask (torch.FloatTensor): A segmentation mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`. Returns: (torch.FloatTensor): The IoU loss, as a torch scalar. |
4,619 | import torch
from kaolin.ops.mesh.tetmesh import _validate_tet_vertices
def tetrahedron_volume(tet_vertices):
r"""Compute the volume of tetrahedrons.
Args:
tet_vertices (torch.Tensor):
Batched tetrahedrons, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`.
Returns:
(torch.Tensor):
volume of each tetrahedron in each mesh, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons})`.
Example:
>>> tet_vertices = torch.tensor([[[[0.5000, 0.5000, 0.4500],
... [0.4500, 0.5000, 0.5000],
... [0.4750, 0.4500, 0.4500],
... [0.5000, 0.5000, 0.5000]]]])
>>> tetrahedron_volume(tet_vertices)
tensor([[-2.0833e-05]])
"""
_validate_tet_vertices(tet_vertices)
# split the tensor
A, B, C, D = [split.squeeze(2) for split in
torch.split(tet_vertices, split_size_or_sections=1, dim=2)]
# compute the volume of each tetrahedron directly by using V = |(a - d) * ((b - d) x (c - d))| / 6
volumes = torch.div(
((A - D) * torch.cross(input=(B - D), other=(C - D), dim=2)).sum(dim=2), 6)
return volumes
def _validate_tet_vertices(tet_vertices):
r"""Helper method to validate the dimensions of the batched tetrahedrons tensor.
Args:
tet_vertices (torch.Tensor):
Batched tetrahedrons, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`.
"""
assert tet_vertices.ndim == 4, \
f"tetrahedrons has {tetrahedrons.ndim} but must have 4 dimensions."
assert tet_vertices.shape[2] == 4, \
f"The third dimension of the tetrahedrons must be 4 " \
f"but the input has {tetrahedrons.shape[2]}. Each tetrahedron has 4 vertices."
assert tet_vertices.shape[3] == 3, \
f"The fourth dimension of the tetrahedrons must be 3 " \
f"but the input has {tetrahedrons.shape[3]}. Each vertex must have 3 dimensions."
The provided code snippet includes necessary dependencies for implementing the `equivolume` function. Write a Python function `def equivolume(tet_vertices, tetrahedrons_mean=None, pow=4)` to solve the following problem:
r"""Compute the EquiVolume loss as devised by *Gao et al.* in `Learning Deformable Tetrahedral Meshes for 3D Reconstruction <https://nv-tlabs.github.io/DefTet/>`_ NeurIPS 2020. See `supplementary material <https://nv-tlabs.github.io/DefTet/files/supplement.pdf>`_ for the definition of the loss function. Args: tet_vertices (torch.Tensor): Batched tetrahedrons, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`. tetrahedrons_mean (torch.Tensor): Mean volume of all tetrahedrons in a grid, of shape :math:`(\text{batch_size})` or :math:`(1,)` (broadcasting). Default: Compute ``torch.mean(tet_vertices, dim=-1)``. pow (int): Power for the equivolume loss. Increasing power puts more emphasis on the larger tetrahedron deformation. Default: 4. Returns: (torch.Tensor): EquiVolume loss for each mesh, of shape :math:`(\text{batch_size})`. Example: >>> tet_vertices = torch.tensor([[[[0.5000, 0.5000, 0.7500], ... [0.4500, 0.8000, 0.6000], ... [0.4750, 0.4500, 0.2500], ... [0.5000, 0.3000, 0.3000]], ... [[0.4750, 0.4500, 0.2500], ... [0.5000, 0.9000, 0.3000], ... [0.4500, 0.4000, 0.9000], ... [0.4500, 0.4500, 0.7000]]], ... [[[0.7000, 0.3000, 0.4500], ... [0.4800, 0.2000, 0.3000], ... [0.9000, 0.4500, 0.4500], ... [0.2000, 0.5000, 0.1000]], ... [[0.3750, 0.4500, 0.2500], ... [0.9000, 0.8000, 0.7000], ... [0.6000, 0.9000, 0.3000], ... [0.5500, 0.3500, 0.9000]]]]) >>> equivolume(tet_vertices, pow=4) tensor([[2.2961e-10], [7.7704e-10]])
Here is the function:
def equivolume(tet_vertices, tetrahedrons_mean=None, pow=4):
r"""Compute the EquiVolume loss as devised by *Gao et al.* in `Learning Deformable Tetrahedral Meshes for 3D
Reconstruction <https://nv-tlabs.github.io/DefTet/>`_ NeurIPS 2020.
See `supplementary material <https://nv-tlabs.github.io/DefTet/files/supplement.pdf>`_ for the definition of the loss function.
Args:
tet_vertices (torch.Tensor):
Batched tetrahedrons, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`.
tetrahedrons_mean (torch.Tensor):
Mean volume of all tetrahedrons in a grid,
of shape :math:`(\text{batch_size})` or :math:`(1,)` (broadcasting).
Default: Compute ``torch.mean(tet_vertices, dim=-1)``.
pow (int):
Power for the equivolume loss.
Increasing power puts more emphasis on the larger tetrahedron deformation.
Default: 4.
Returns:
(torch.Tensor):
EquiVolume loss for each mesh, of shape :math:`(\text{batch_size})`.
Example:
>>> tet_vertices = torch.tensor([[[[0.5000, 0.5000, 0.7500],
... [0.4500, 0.8000, 0.6000],
... [0.4750, 0.4500, 0.2500],
... [0.5000, 0.3000, 0.3000]],
... [[0.4750, 0.4500, 0.2500],
... [0.5000, 0.9000, 0.3000],
... [0.4500, 0.4000, 0.9000],
... [0.4500, 0.4500, 0.7000]]],
... [[[0.7000, 0.3000, 0.4500],
... [0.4800, 0.2000, 0.3000],
... [0.9000, 0.4500, 0.4500],
... [0.2000, 0.5000, 0.1000]],
... [[0.3750, 0.4500, 0.2500],
... [0.9000, 0.8000, 0.7000],
... [0.6000, 0.9000, 0.3000],
... [0.5500, 0.3500, 0.9000]]]])
>>> equivolume(tet_vertices, pow=4)
tensor([[2.2961e-10],
[7.7704e-10]])
"""
_validate_tet_vertices(tet_vertices)
# compute the volume of each tetrahedron
volumes = tetrahedron_volume(tet_vertices)
if tetrahedrons_mean is None:
# finding the mean volume of all tetrahedrons in the tetrahedron grid
tetrahedrons_mean = torch.mean(volumes, dim=-1)
tetrahedrons_mean = tetrahedrons_mean.reshape(1, -1)
# compute EquiVolume loss
equivolume_loss = torch.mean(torch.pow(
torch.abs(volumes - tetrahedrons_mean), exponent=pow),
dim=-1, keepdim=True)
return equivolume_loss | r"""Compute the EquiVolume loss as devised by *Gao et al.* in `Learning Deformable Tetrahedral Meshes for 3D Reconstruction <https://nv-tlabs.github.io/DefTet/>`_ NeurIPS 2020. See `supplementary material <https://nv-tlabs.github.io/DefTet/files/supplement.pdf>`_ for the definition of the loss function. Args: tet_vertices (torch.Tensor): Batched tetrahedrons, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`. tetrahedrons_mean (torch.Tensor): Mean volume of all tetrahedrons in a grid, of shape :math:`(\text{batch_size})` or :math:`(1,)` (broadcasting). Default: Compute ``torch.mean(tet_vertices, dim=-1)``. pow (int): Power for the equivolume loss. Increasing power puts more emphasis on the larger tetrahedron deformation. Default: 4. Returns: (torch.Tensor): EquiVolume loss for each mesh, of shape :math:`(\text{batch_size})`. Example: >>> tet_vertices = torch.tensor([[[[0.5000, 0.5000, 0.7500], ... [0.4500, 0.8000, 0.6000], ... [0.4750, 0.4500, 0.2500], ... [0.5000, 0.3000, 0.3000]], ... [[0.4750, 0.4500, 0.2500], ... [0.5000, 0.9000, 0.3000], ... [0.4500, 0.4000, 0.9000], ... [0.4500, 0.4500, 0.7000]]], ... [[[0.7000, 0.3000, 0.4500], ... [0.4800, 0.2000, 0.3000], ... [0.9000, 0.4500, 0.4500], ... [0.2000, 0.5000, 0.1000]], ... [[0.3750, 0.4500, 0.2500], ... [0.9000, 0.8000, 0.7000], ... [0.6000, 0.9000, 0.3000], ... [0.5500, 0.3500, 0.9000]]]]) >>> equivolume(tet_vertices, pow=4) tensor([[2.2961e-10], [7.7704e-10]]) |
4,620 | import torch
from kaolin.ops.mesh.tetmesh import _validate_tet_vertices
def _validate_tet_vertices(tet_vertices):
r"""Helper method to validate the dimensions of the batched tetrahedrons tensor.
Args:
tet_vertices (torch.Tensor):
Batched tetrahedrons, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`.
"""
assert tet_vertices.ndim == 4, \
f"tetrahedrons has {tetrahedrons.ndim} but must have 4 dimensions."
assert tet_vertices.shape[2] == 4, \
f"The third dimension of the tetrahedrons must be 4 " \
f"but the input has {tetrahedrons.shape[2]}. Each tetrahedron has 4 vertices."
assert tet_vertices.shape[3] == 3, \
f"The fourth dimension of the tetrahedrons must be 3 " \
f"but the input has {tetrahedrons.shape[3]}. Each vertex must have 3 dimensions."
The provided code snippet includes necessary dependencies for implementing the `amips` function. Write a Python function `def amips(tet_vertices, inverse_offset_matrix)` to solve the following problem:
r"""Compute the AMIPS (Advanced MIPS) loss as devised by *Fu et al.* in `Computing Locally Injective Mappings by Advanced MIPS. \ <https://www.microsoft.com/en-us/research/publication/computing-locally-injective-mappings-advanced-mips/>`_ ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2015. The Jacobian can be derived as: :math:`J = (g(x) - g(x_0)) / (x - x_0)` Only components where the determinant of the Jacobian is positive, are included in the calculation of AMIPS. This is because the AMIPS Loss is only defined for tetrahedrons whose determinant of the Jacobian is positive. Args: tet_vertices (torch.Tensor): Batched tetrahedrons, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`. inverse_offset_matrix (torch.LongTensor): The inverse of the offset matrix is of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`. Refer to :func:`kaolin.ops.mesh.tetmesh.inverse_vertices_offset`. Returns: (torch.Tensor): AMIPS loss for each mesh, of shape :math:`(\text{batch_size})`. Example: >>> tet_vertices = torch.tensor([[[[1.7000, 2.3000, 4.4500], ... [3.4800, 0.2000, 5.3000], ... [4.9000, 9.4500, 6.4500], ... [6.2000, 8.5000, 7.1000]], ... [[-1.3750, 1.4500, 3.2500], ... [4.9000, 1.8000, 2.7000], ... [3.6000, 1.9000, 2.3000], ... [1.5500, 1.3500, 2.9000]]], ... [[[1.7000, 2.3000, 4.4500], ... [3.4800, 0.2000, 5.3000], ... [4.9000, 9.4500, 6.4500], ... [6.2000, 8.5000, 7.1000]], ... [[-1.3750, 1.4500, 3.2500], ... [4.9000, 1.8000, 2.7000], ... [3.6000, 1.9000, 2.3000], ... [1.5500, 1.3500, 2.9000]]]]) >>> inverse_offset_matrix = torch.tensor([[[[ -1.1561, -1.1512, -1.9049], ... [1.5138, 1.0108, 3.4302], ... [1.6538, 1.0346, 4.2223]], ... [[ 2.9020, -1.0995, -1.8744], ... [ 1.1554, 1.1519, 1.7780], ... [-0.0766, 1.6350, 1.1064]]], ... [[[-0.9969, 1.4321, -0.3075], ... [-1.3414, 1.5795, -1.6571], ... [-0.1775, -0.4349, 1.1772]], ... [[-1.1077, -1.2441, 1.8037], ... [-0.5722, 0.1755, -2.4364], ... [-0.5263, 1.5765, 1.5607]]]]) >>> amips(tet_vertices, inverse_offset_matrix) tensor([[13042.3408], [ 2376.2517]])
Here is the function:
def amips(tet_vertices, inverse_offset_matrix):
r"""Compute the AMIPS (Advanced MIPS) loss as devised by *Fu et al.* in
`Computing Locally Injective Mappings by Advanced MIPS. \
<https://www.microsoft.com/en-us/research/publication/computing-locally-injective-mappings-advanced-mips/>`_
ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2015.
The Jacobian can be derived as: :math:`J = (g(x) - g(x_0)) / (x - x_0)`
Only components where the determinant of the Jacobian is positive, are included in the calculation of AMIPS.
This is because the AMIPS Loss is only defined for tetrahedrons whose determinant of the Jacobian is positive.
Args:
tet_vertices (torch.Tensor):
Batched tetrahedrons, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`.
inverse_offset_matrix (torch.LongTensor):
The inverse of the offset matrix is of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`.
Refer to :func:`kaolin.ops.mesh.tetmesh.inverse_vertices_offset`.
Returns:
(torch.Tensor):
AMIPS loss for each mesh, of shape :math:`(\text{batch_size})`.
Example:
>>> tet_vertices = torch.tensor([[[[1.7000, 2.3000, 4.4500],
... [3.4800, 0.2000, 5.3000],
... [4.9000, 9.4500, 6.4500],
... [6.2000, 8.5000, 7.1000]],
... [[-1.3750, 1.4500, 3.2500],
... [4.9000, 1.8000, 2.7000],
... [3.6000, 1.9000, 2.3000],
... [1.5500, 1.3500, 2.9000]]],
... [[[1.7000, 2.3000, 4.4500],
... [3.4800, 0.2000, 5.3000],
... [4.9000, 9.4500, 6.4500],
... [6.2000, 8.5000, 7.1000]],
... [[-1.3750, 1.4500, 3.2500],
... [4.9000, 1.8000, 2.7000],
... [3.6000, 1.9000, 2.3000],
... [1.5500, 1.3500, 2.9000]]]])
>>> inverse_offset_matrix = torch.tensor([[[[ -1.1561, -1.1512, -1.9049],
... [1.5138, 1.0108, 3.4302],
... [1.6538, 1.0346, 4.2223]],
... [[ 2.9020, -1.0995, -1.8744],
... [ 1.1554, 1.1519, 1.7780],
... [-0.0766, 1.6350, 1.1064]]],
... [[[-0.9969, 1.4321, -0.3075],
... [-1.3414, 1.5795, -1.6571],
... [-0.1775, -0.4349, 1.1772]],
... [[-1.1077, -1.2441, 1.8037],
... [-0.5722, 0.1755, -2.4364],
... [-0.5263, 1.5765, 1.5607]]]])
>>> amips(tet_vertices, inverse_offset_matrix)
tensor([[13042.3408],
[ 2376.2517]])
"""
_validate_tet_vertices(tet_vertices)
# split the tensor
A, B, C, D = torch.split(tet_vertices, split_size_or_sections=1, dim=2)
# compute the offset matrix of the tetrahedrons w.r.t. vertex A.
offset_matrix = torch.cat([B - A, C - A, D - A], dim=2)
# compute the Jacobian for each tetrahedron - the Jacobian represents the unique 3D deformation that transforms the
# tetrahedron t into a regular tetrahedron.
jacobian = torch.matmul(offset_matrix, inverse_offset_matrix)
# compute determinant of Jacobian
j_det = torch.det(jacobian)
# compute the trace of J * J.T
jacobian_squared = torch.matmul(jacobian, torch.transpose(jacobian, -2, -1))
trace = torch.diagonal(jacobian_squared, dim1=-2, dim2=-1).sum(-1)
# compute the determinant of the Jacobian to the 2/3
EPS = 1e-10
denominator = torch.pow(torch.pow(j_det, 2) + EPS, 1 / 3)
# compute amips energy for positive tetrahedrons whose determinant of their Jacobian is positive
amips_energy = torch.mean(torch.div(trace, denominator) * (j_det >= 0).float(),
dim=1, keepdim=True)
return amips_energy | r"""Compute the AMIPS (Advanced MIPS) loss as devised by *Fu et al.* in `Computing Locally Injective Mappings by Advanced MIPS. \ <https://www.microsoft.com/en-us/research/publication/computing-locally-injective-mappings-advanced-mips/>`_ ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2015. The Jacobian can be derived as: :math:`J = (g(x) - g(x_0)) / (x - x_0)` Only components where the determinant of the Jacobian is positive, are included in the calculation of AMIPS. This is because the AMIPS Loss is only defined for tetrahedrons whose determinant of the Jacobian is positive. Args: tet_vertices (torch.Tensor): Batched tetrahedrons, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`. inverse_offset_matrix (torch.LongTensor): The inverse of the offset matrix is of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`. Refer to :func:`kaolin.ops.mesh.tetmesh.inverse_vertices_offset`. Returns: (torch.Tensor): AMIPS loss for each mesh, of shape :math:`(\text{batch_size})`. Example: >>> tet_vertices = torch.tensor([[[[1.7000, 2.3000, 4.4500], ... [3.4800, 0.2000, 5.3000], ... [4.9000, 9.4500, 6.4500], ... [6.2000, 8.5000, 7.1000]], ... [[-1.3750, 1.4500, 3.2500], ... [4.9000, 1.8000, 2.7000], ... [3.6000, 1.9000, 2.3000], ... [1.5500, 1.3500, 2.9000]]], ... [[[1.7000, 2.3000, 4.4500], ... [3.4800, 0.2000, 5.3000], ... [4.9000, 9.4500, 6.4500], ... [6.2000, 8.5000, 7.1000]], ... [[-1.3750, 1.4500, 3.2500], ... [4.9000, 1.8000, 2.7000], ... [3.6000, 1.9000, 2.3000], ... [1.5500, 1.3500, 2.9000]]]]) >>> inverse_offset_matrix = torch.tensor([[[[ -1.1561, -1.1512, -1.9049], ... [1.5138, 1.0108, 3.4302], ... [1.6538, 1.0346, 4.2223]], ... [[ 2.9020, -1.0995, -1.8744], ... [ 1.1554, 1.1519, 1.7780], ... [-0.0766, 1.6350, 1.1064]]], ... [[[-0.9969, 1.4321, -0.3075], ... [-1.3414, 1.5795, -1.6571], ... [-0.1775, -0.4349, 1.1772]], ... [[-1.1077, -1.2441, 1.8037], ... [-0.5722, 0.1755, -2.4364], ... [-0.5263, 1.5765, 1.5607]]]]) >>> amips(tet_vertices, inverse_offset_matrix) tensor([[13042.3408], [ 2376.2517]]) |
4,621 | import torch
The provided code snippet includes necessary dependencies for implementing the `iou` function. Write a Python function `def iou(pred, gt)` to solve the following problem:
r"""Computes IoU across two voxelgrids Arguments: pred (torch.Tensor): predicted (binary) voxelgrids, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. gt (torch.Tensor): ground-truth (binary) voxelgrids, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. Returns: (torch.FloatTensor): the intersection over union value. Example: >>> pred = torch.tensor([[[[0., 0.], ... [1., 1.]], ... [[1., 1.], ... [1., 1.]]]]) >>> gt = torch.ones((1,2,2,2)) >>> iou(pred, gt) tensor([0.7500])
Here is the function:
def iou(pred, gt):
r"""Computes IoU across two voxelgrids
Arguments:
pred (torch.Tensor): predicted (binary) voxelgrids, of shape
:math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`.
gt (torch.Tensor): ground-truth (binary) voxelgrids, of shape
:math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`.
Returns:
(torch.FloatTensor): the intersection over union value.
Example:
>>> pred = torch.tensor([[[[0., 0.],
... [1., 1.]],
... [[1., 1.],
... [1., 1.]]]])
>>> gt = torch.ones((1,2,2,2))
>>> iou(pred, gt)
tensor([0.7500])
"""
if pred.shape != gt.shape:
raise ValueError(
f"Expected predicted voxelgrids and ground truth voxelgrids to have "
f"the same shape, but got {pred.shape} for predicted and {gt.shape} for ground truth.")
pred = pred.bool()
gt = gt.bool()
intersection = torch.sum(torch.logical_and(pred, gt), dim=(1, 2, 3)).float()
union = torch.sum(torch.logical_or(pred, gt), dim=(1, 2, 3)).float()
return intersection / union | r"""Computes IoU across two voxelgrids Arguments: pred (torch.Tensor): predicted (binary) voxelgrids, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. gt (torch.Tensor): ground-truth (binary) voxelgrids, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. Returns: (torch.FloatTensor): the intersection over union value. Example: >>> pred = torch.tensor([[[[0., 0.], ... [1., 1.]], ... [[1., 1.], ... [1., 1.]]]]) >>> gt = torch.ones((1,2,2,2)) >>> iou(pred, gt) tensor([0.7500]) |
4,622 | import math
import torch
from kaolin import _C
def _dot(a, b):
"""Compute dot product of two tensors on the last axis."""
return torch.sum(a * b, dim=-1, keepdim=True)
def _ggx_v1(m2, nDotX):
"""Helper for computing the Smith visibility term with Trowbridge-Reitz (GGX) distribution"""
return 1. / (nDotX + torch.sqrt(m2 + (1. - m2) * nDotX * nDotX))
def sg_distribution_term(direction, roughness):
r"""Returns spherical gaussians approximation of the
`Trowbridge-Reitz`_ (GGX) distribution used in the Cook-Torrance specular BRDF.
Use a single lobe to approximate the distribution.
Args:
direction (torch.Tensor):
The normal directions, of shape :math:`(\text{num_points}, 3)`
roughness (torch.Tensor):
The roughness of the surface, of shape :math:`(\text{num_points})`
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor):
- The amplitude of the spherical gaussians, of shape :math:`(\text{num_points}, 3)`.
- The input ``direction``.
- The sharpness of the spherical gaussians, of shape :math:`(\text{num_points})`.
.. _Trowbridge-Reitz:
https://opg.optica.org/josa/abstract.cfm?uri=josa-65-5-531
"""
assert direction.ndim == 2 and direction.shape[-1]
assert roughness.shape == direction.shape[:1]
m2 = roughness * roughness
sharpness = 2. / m2
amplitude = (1. / (math.pi * m2)).unsqueeze(-1).expand(-1, 3)
return amplitude, direction, sharpness
def sg_warp_distribution(amplitude, direction, sharpness, view):
r"""Generate spherical gaussians that best represent the normal distribution function but
with its axis oriented in the direction of the current BRDF slice.
Uses the warping operator from `Wang et al`_.
Args:
amplitude (torch.Tensor):
The amplitudes of the spherical gaussians to be warped,
of shape :math:`(\text{num_sg}, 3)`.
direction (torch.Tensor):
The directions of the spherical gaussians to be warped,
of shape :math:`(\text{num_sg}, 3)`.
sharpness (torch.Tensor):
The sharpness of the spherical gaussians to be warped,
of shape :math:`(\text{num_sg},)`.
view (torch.Tensor): The view direction, of shape :math:`(\text{num_sg}, 3)`.
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor):
- The input ``amplitude``
- The warped direction, of shape :math:`(\text{num_sg}, 3)`
- The warped sharpness, of shape :math:`(\text{num_sg})`
.. _Wang et al:
https://www.microsoft.com/en-us/research/wp-content/uploads/2009/12/sg.pdf
"""
assert amplitude.ndim == 2 and amplitude.shape[-1] == 3
assert direction.shape == amplitude.shape
assert sharpness.shape == amplitude.shape[:1]
assert view.shape == amplitude.shape
warp_direction = _reflect(-view, direction)
# TODO(cfujitsang): DIBR++ don't apply clamping, is that important?
warp_sharpness = sharpness / (
4. * torch.clamp(_dot(direction, view).squeeze(-1), min=1e-4))
return amplitude, warp_direction, warp_sharpness
def fresnel(ldh, spec_albedo):
powTerm = torch.pow((1. - ldh), 5)
return spec_albedo + (1. - spec_albedo) * powTerm
def unbatched_reduced_sg_inner_product(amplitude, direction, sharpness,
other_amplitude, other_direction, other_sharpness):
r"""Fused unbatched_sg_inner_product(...).sum(1).
By being fused it is faster and consume less memory, especially at scale.
Args:
amplitude (torch.FloatTensor): amplitude of left hand-side sg,
of shape :math:(\text{num_sg}, 3)`.
direction (torch.FloatTensor): direction of left hand-side sg,
of shape :math:(\text{num_sg}, 3)`.
sharpness (torch.FloatTensor): sharpness of left hand-side sg,
of shape :math:(\text{num_size})`.
other_amplitude (torch.FloatTensor): amplitude of right hand-side sg,
of shape :math:`(\text{num_other}, 3)`.
other_direction (torch.FloatTensor): direction of right hand-side sg,
of shape :math:`(\text{num_other}, 3)`.
other_sharpness (torch.FloatTensor): sharpness of right hand-size sg,
of shape :math:`(\text{num_other})`.
Return:
(torch.FloatTensor): a reduced output, of shape :math:`(\text{num_sg}, 3)`.
"""
assert amplitude.ndim == 2 and amplitude.shape[1] == 3
assert direction.shape == amplitude.shape
assert sharpness.shape == amplitude.shape[:1]
assert other_amplitude.ndim == 2 and other_amplitude.shape[1] == 3
assert other_direction.shape == other_amplitude.shape
assert other_sharpness.shape == other_amplitude.shape[:1]
if other_amplitude.shape[0] >= 8:
output = UnbatchedReducedSgInnerProduct.apply(
amplitude, direction, sharpness,
other_amplitude, other_direction, other_sharpness
)
else:
output = unbatched_sg_inner_product(
amplitude, direction, sharpness,
other_amplitude, other_direction, other_sharpness
).sum(1)
return output
The provided code snippet includes necessary dependencies for implementing the `sg_warp_specular_term` function. Write a Python function `def sg_warp_specular_term(amplitude, direction, sharpness, normal, roughness, view, spec_albedo)` to solve the following problem:
r"""Computes the specular reflectance from a spherical gaussians lobes representing incoming radiance, using the Cook-Torrance microfacet specular shading model. Args: amplitude (torch.Tensor): The amplitudes of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. direction (torch.Tensor): The directions of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. sharpness (torch.Tensor): The sharpness of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg},)`. normal (torch.Tensor): The normal of the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points}, 3)`. roughness (torch.Tensor): The roughness of the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points})`. view (torch.Tensor): The direction toward the camera from the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points}, 3)`. spec_albedo (torch.Tensor): The specular albedo (RGB color) of the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The specular reflectance, of shape :math:`(\text{num_points}, 3)`.
Here is the function:
def sg_warp_specular_term(amplitude, direction, sharpness, normal,
roughness, view, spec_albedo):
r"""Computes the specular reflectance from a spherical gaussians lobes representing incoming radiance,
using the Cook-Torrance microfacet specular shading model.
Args:
amplitude (torch.Tensor):
The amplitudes of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
direction (torch.Tensor):
The directions of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
sharpness (torch.Tensor):
The sharpness of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg},)`.
normal (torch.Tensor):
The normal of the surface points where the specular reflectance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
roughness (torch.Tensor):
The roughness of the surface points where the specular reflectance is to be estimated,
of shape :math:`(\text{num_points})`.
view (torch.Tensor):
The direction toward the camera from the surface points where
the specular reflectance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
spec_albedo (torch.Tensor):
The specular albedo (RGB color) of the surface points where the specular reflectance
is to be estimated, of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.Tensor): The specular reflectance, of shape :math:`(\text{num_points}, 3)`.
"""
assert amplitude.ndim == 2 and amplitude.shape[-1]
assert direction.shape == amplitude.shape
assert sharpness.shape == amplitude.shape[:1]
assert normal.ndim == 2 and normal.shape[-1] == 3
assert roughness.shape == normal.shape[:1]
assert view.shape == normal.shape
assert spec_albedo.shape == normal.shape
ndf_amplitude, ndf_direction, ndf_sharpness = sg_distribution_term(
normal, roughness)
ndf_amplitude, ndf_direction, ndf_sharpness = sg_warp_distribution(
ndf_amplitude, ndf_direction, ndf_sharpness, view
)
ndl = torch.clamp(_dot(normal, ndf_direction), min=0., max=1.)
ndv = torch.clamp(_dot(normal, view), min=0., max=1.)
h = ndf_direction + view
h /= torch.sqrt(_dot(h, h))
ldh = torch.clamp(_dot(ndf_direction, h), min=0., max=1.)
output = unbatched_reduced_sg_inner_product(
ndf_amplitude, ndf_direction, ndf_sharpness,
amplitude, direction, sharpness)
m2 = (roughness * roughness).unsqueeze(-1)
output *= _ggx_v1(m2, ndl) * _ggx_v1(m2, ndv)
output *= fresnel(ldh, spec_albedo)
output *= ndl
return torch.clamp(output, min=0.) | r"""Computes the specular reflectance from a spherical gaussians lobes representing incoming radiance, using the Cook-Torrance microfacet specular shading model. Args: amplitude (torch.Tensor): The amplitudes of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. direction (torch.Tensor): The directions of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. sharpness (torch.Tensor): The sharpness of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg},)`. normal (torch.Tensor): The normal of the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points}, 3)`. roughness (torch.Tensor): The roughness of the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points})`. view (torch.Tensor): The direction toward the camera from the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points}, 3)`. spec_albedo (torch.Tensor): The specular albedo (RGB color) of the surface points where the specular reflectance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The specular reflectance, of shape :math:`(\text{num_points}, 3)`. |
4,623 | import math
import torch
from kaolin import _C
def sg_irradiance_fitted(amplitude, direction, sharpness, normal):
r"""Computes an approximate incident irradiance from multiple spherical gaussians
representing the incoming radiance.
The result is broadcasted per point per spherical gaussian.
.. note::
The irradiance is computed using a fitted approximation polynomial,
this approximation were provided by Stephen Hill.
Args:
amplitude (torch.Tensor):
The amplitudes of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
direction (torch.Tensor):
The directions of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
sharpness (torch.Tensor):
The sharpness of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg},)`.
normal (torch.Tensor):
The normal of the surface points where the irradiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.Tensor):
The irradiance for each spherical gaussian for each surface point,
of shape :math:`(\text{num_points}, \text{num_sg}, 3)`.
"""
assert amplitude.ndim == 2 and amplitude.shape[-1] == 3
assert direction.shape == amplitude.shape
assert sharpness.shape == amplitude.shape[:1]
assert normal.ndim == 2 and normal.shape[1] == 3
mu_n = torch.einsum('ik,jk->ij', normal, direction);
lbda = sharpness.unsqueeze(0);
c0 = 0.36;
c1 = 1. / (4. * c0);
eml = torch.exp(-lbda);
em2l = eml * eml;
rl = 1. / lbda;
scale = 1. + 2. * em2l - rl;
bias = (eml - em2l) * rl - em2l;
x = torch.sqrt(1. - scale);
x0 = c0 * mu_n;
x1 = (c1 * x);
n = x0 + x1;
y = torch.where(abs(x0) <= x1,
n * n / x,
torch.clamp(mu_n, min=0., max=1.))
result = scale * y + bias;
return result.unsqueeze(-1) * \
approximate_sg_integral(amplitude, sharpness).unsqueeze(0);
The provided code snippet includes necessary dependencies for implementing the `sg_diffuse_fitted` function. Write a Python function `def sg_diffuse_fitted(amplitude, direction, sharpness, normal, albedo)` to solve the following problem:
r"""Computes the outgoing radiance from multiple spherical gaussians representing incoming radiance, using a Lambertian diffuse BRDF. .. note:: The irradiance is computed using a fitted approximation polynomial, this approximation were provided by Stephen Hill. See :func:`sg_irradiance_fitted`. Args: amplitude (torch.Tensor): The amplitudes of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. direction (torch.Tensor): The directions of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. sharpness (torch.Tensor): The sharpness of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg},)`. normal (torch.Tensor): The normal of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. albedo (torch.Tensor): The albedo (RGB color) of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The diffuse radiance, of shape :math:`(\text{num_points}, 3)`.
Here is the function:
def sg_diffuse_fitted(amplitude, direction, sharpness, normal, albedo):
r"""Computes the outgoing radiance from multiple spherical gaussians representing incoming radiance,
using a Lambertian diffuse BRDF.
.. note::
The irradiance is computed using a fitted approximation polynomial,
this approximation were provided by Stephen Hill. See :func:`sg_irradiance_fitted`.
Args:
amplitude (torch.Tensor):
The amplitudes of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
direction (torch.Tensor):
The directions of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
sharpness (torch.Tensor):
The sharpness of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg},)`.
normal (torch.Tensor):
The normal of the surface points where the radiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
albedo (torch.Tensor):
The albedo (RGB color) of the surface points where the radiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.Tensor): The diffuse radiance, of shape :math:`(\text{num_points}, 3)`.
"""
assert amplitude.ndim == 2 and amplitude.shape[1] == 3
assert direction.shape == amplitude.shape
assert sharpness.shape == amplitude.shape[:1]
assert normal.ndim == 2 and normal.shape[1] == 3
assert albedo.shape == normal.shape
brdf = albedo / math.pi
return torch.clamp(
sg_irradiance_fitted(amplitude, direction, sharpness, normal).mean(1),
min=0.) * brdf; | r"""Computes the outgoing radiance from multiple spherical gaussians representing incoming radiance, using a Lambertian diffuse BRDF. .. note:: The irradiance is computed using a fitted approximation polynomial, this approximation were provided by Stephen Hill. See :func:`sg_irradiance_fitted`. Args: amplitude (torch.Tensor): The amplitudes of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. direction (torch.Tensor): The directions of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. sharpness (torch.Tensor): The sharpness of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg},)`. normal (torch.Tensor): The normal of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. albedo (torch.Tensor): The albedo (RGB color) of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The diffuse radiance, of shape :math:`(\text{num_points}, 3)`. |
4,624 | import math
import torch
from kaolin import _C
def sg_irradiance_inner_product(amplitude, direction, sharpness, normal):
r"""Computes the approximate incident irradiance from multiple spherical gaussians representing incoming radiance.
The clamped cosine lobe is approximated as a spherical gaussian,
and convolved with the incoming radiance lobe using a spherical gaussian inner product.
Args:
amplitude (torch.Tensor):
The amplitudes of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
direction (torch.Tensor):
The directions of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
sharpness (torch.Tensor):
The sharpness of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg},)`.
normal (torch.Tensor):
The normal of the surface points where the radiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.Tensor): The irradiance, of shape :math:`(\text{num_points}, 3)`.
"""
assert amplitude.ndim == 2 and amplitude.shape[1] == 3
assert direction.shape == amplitude.shape
assert sharpness.shape == amplitude.shape[:1]
assert normal.ndim == 2 and normal.shape[1] == 3
lobe_amplitude, lobe_direction, lobe_sharpness = cosine_lobe_sg(normal)
return torch.clamp(unbatched_reduced_sg_inner_product(
lobe_amplitude, lobe_direction, lobe_sharpness,
amplitude, direction, sharpness
), min=0.)
The provided code snippet includes necessary dependencies for implementing the `sg_diffuse_inner_product` function. Write a Python function `def sg_diffuse_inner_product(amplitude, direction, sharpness, normal, albedo)` to solve the following problem:
r"""Computes the outgoing radiance from multiple spherical gaussians representing incoming radiance, using a Lambertian diffuse BRDF. This is the diffuse reflectance used in `DIB-R++\: Learning to Predict Lighting and Material with a Hybrid Differentiable Renderer`_ NeurIPS 2021. Args: amplitude (torch.Tensor): The amplitudes of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. direction (torch.Tensor): The directions of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. sharpness (torch.Tensor): The sharpness of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg},)`. normal (torch.Tensor): The normal of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. albedo (torch.Tensor): The albedo of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The diffuse radiance, of shape :math:`(\text{num_points}, 3)`. .. _DIB-R++\: Learning to Predict Lighting and Material with a Hybrid Differentiable Renderer: https://nv-tlabs.github.io/DIBRPlus/
Here is the function:
def sg_diffuse_inner_product(amplitude, direction, sharpness, normal, albedo):
r"""Computes the outgoing radiance from multiple spherical gaussians representing incoming radiance,
using a Lambertian diffuse BRDF.
This is the diffuse reflectance used in
`DIB-R++\: Learning to Predict Lighting and Material with a Hybrid Differentiable Renderer`_
NeurIPS 2021.
Args:
amplitude (torch.Tensor):
The amplitudes of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
direction (torch.Tensor):
The directions of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg}, 3)`.
sharpness (torch.Tensor):
The sharpness of the spherical gaussians representing the incoming radiance,
of shape :math:`(\text{num_sg},)`.
normal (torch.Tensor):
The normal of the surface points where the radiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
albedo (torch.Tensor):
The albedo of the surface points where the radiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.Tensor): The diffuse radiance, of shape :math:`(\text{num_points}, 3)`.
.. _DIB-R++\: Learning to Predict Lighting and Material with a Hybrid Differentiable Renderer:
https://nv-tlabs.github.io/DIBRPlus/
"""
assert amplitude.ndim == 2 and amplitude.shape[1] == 3
assert direction.shape == amplitude.shape
assert sharpness.shape == amplitude.shape[:1]
assert normal.ndim == 2 and normal.shape[1] == 3
assert albedo.shape == normal.shape
brdf = albedo / math.pi
return sg_irradiance_inner_product(
amplitude, direction, sharpness, normal) * brdf; | r"""Computes the outgoing radiance from multiple spherical gaussians representing incoming radiance, using a Lambertian diffuse BRDF. This is the diffuse reflectance used in `DIB-R++\: Learning to Predict Lighting and Material with a Hybrid Differentiable Renderer`_ NeurIPS 2021. Args: amplitude (torch.Tensor): The amplitudes of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. direction (torch.Tensor): The directions of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg}, 3)`. sharpness (torch.Tensor): The sharpness of the spherical gaussians representing the incoming radiance, of shape :math:`(\text{num_sg},)`. normal (torch.Tensor): The normal of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. albedo (torch.Tensor): The albedo of the surface points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The diffuse radiance, of shape :math:`(\text{num_points}, 3)`. .. _DIB-R++\: Learning to Predict Lighting and Material with a Hybrid Differentiable Renderer: https://nv-tlabs.github.io/DIBRPlus/ |
4,625 | import math
import torch
def project_onto_sh9(directions):
r"""Project directions, represented as cartesian coordinates,
onto the spherical harmonic coefficients of degree 3.
Args:
directions (torch.Tensor or list of int):
The directions as cartesian coordinates,
of any shape but of last dimension 3.
Returns:
(torch.Tensor): The spherical harmonics coefficients,
of shape ``direction.shape[:-1]`` and last dimension 9.
"""
# Band 0
if isinstance(directions, torch.Tensor):
assert directions.shape[-1] == 3
x, y, z = torch.split(directions, 1, dim=-1)
band0 = torch.full_like(x, 0.28209479177)
elif isinstance(directions, list):
assert len(directions) == 3
x, y, z = directions
band0 = 0.28209479177
else:
raise TypeError(f"direction is a {type(direction)}, "
"must be a list or a torch.Tensor")
# Band 1
band1_m1 = -0.4886025119 * y
band1_0 = 0.4886025119 * z
band1_p1 = -0.4886025119 * x
# Band 2
band2_m2 = 1.0925484305920792 * (x * y)
band2_m1 = -1.0925484305920792 * (y * z)
band2_0 = 0.94617469575 * (z * z) - 0.31539156525
band2_p1 = -1.0925484305920792 * x * z
band2_p2 = 0.5462742152960396 * (x * x - y * y)
if isinstance(directions, torch.Tensor):
return torch.cat([
band0,
band1_m1, band1_0, band1_p1,
band2_m2, band2_m1, band2_0, band2_p1, band2_p2
], dim=-1)
else:
return torch.tensor([
band0,
band1_m1, band1_0, band1_p1,
band2_m2, band2_m1, band2_0, band2_p1, band2_p2
])
def sh9_irradiance(lights, normals):
r"""Compute approximate incident irradiance from a single spherical harmonic lobe of degree 3
representing incoming radiance.
The clamped cosine lobe is approximated as spherical harmonics.
Args:
lights (torch.Tensor): Light parameters of each spherical harmonic
(see: :func:`project_onto_sh9`),
of 1D size :math:`(9,)`.
normals (torch.Tensor): Normal of the points where the irradiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.Tensor): The irradiance values, of 1D shape :math:`(\text{num_points},)`.
"""
assert lights.shape == (9,)
assert normals.ndim == 2 and normals.shape[-1] == 3
bands = project_onto_sh9(normals)
bands[..., 0] *= math.pi
bands[..., 1:4] *= 2. * math.pi / 3.
bands[..., 4:] *= math.pi / 4.
return torch.sum(bands * lights.unsqueeze(-2), dim=-1).reshape(*normals.shape[:-1])
The provided code snippet includes necessary dependencies for implementing the `sh9_diffuse` function. Write a Python function `def sh9_diffuse(directions, normals, albedo)` to solve the following problem:
r"""Compute the outgoing radiance from a single spherical harmonic lobe of degree 3 representing incoming radiance, using a Lambertian diffuse BRDF. Args: directions (torch.Tensor): Light directions, of 1D size :math:`(3,)`. normals (torch.Tensor): Normal of the points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. albedo (torch.Tensor): albedo (RGB color) of the points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The diffuse radiance, of same shape than ``albedo``.
Here is the function:
def sh9_diffuse(directions, normals, albedo):
r"""Compute the outgoing radiance from a single spherical harmonic lobe of degree 3
representing incoming radiance, using a Lambertian diffuse BRDF.
Args:
directions (torch.Tensor): Light directions, of 1D size :math:`(3,)`.
normals (torch.Tensor): Normal of the points where the radiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
albedo (torch.Tensor): albedo (RGB color) of the points where the radiance is to be estimated,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.Tensor): The diffuse radiance, of same shape than ``albedo``.
"""
assert directions.shape == (3,)
assert normals.ndim == 2 and normals.shape[1] == 3
assert normals.shape == albedo.shape
lights = project_onto_sh9(directions)
irradiance = sh9_irradiance(lights, normals)
return albedo * irradiance.unsqueeze(-1) | r"""Compute the outgoing radiance from a single spherical harmonic lobe of degree 3 representing incoming radiance, using a Lambertian diffuse BRDF. Args: directions (torch.Tensor): Light directions, of 1D size :math:`(3,)`. normals (torch.Tensor): Normal of the points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. albedo (torch.Tensor): albedo (RGB color) of the points where the radiance is to be estimated, of shape :math:`(\text{num_points}, 3)`. Returns: (torch.Tensor): The diffuse radiance, of same shape than ``albedo``. |
4,626 | from __future__ import division
import torch
from .. import camera
from ... import ops
The provided code snippet includes necessary dependencies for implementing the `texture_mapping` function. Write a Python function `def texture_mapping(texture_coordinates, texture_maps, mode='nearest')` to solve the following problem:
r"""Interpolates texture_maps by dense or sparse texture_coordinates. This function supports sampling texture coordinates for: 1. An entire 2D image 2. A sparse point cloud of texture coordinates. Args: texture_coordinates(torch.FloatTensor): dense image texture coordinate, of shape :math:`(\text{batch_size}, h, w, 2)` or sparse texture coordinate for points, of shape :math:`(\text{batch_size}, \text{num_points}, 2)` Coordinates are expected to be normalized between [0, 1]. Note that opengl tex coord is different from pytorch's coord. opengl coord ranges from 0 to 1, y axis is from bottom to top and it supports circular mode(-0.1 is the same as 0.9) pytorch coord ranges from -1 to 1, y axis is from top to bottom and does not support circular filtering is the same as the mode parameter for torch.nn.functional.grid_sample. texture_maps(torch.FloatTensor): textures of shape :math:`(\text{batch_size}, \text{num_channels}, h', w')`. Here, :math:`h'` & :math:`w'` are the height and width of texture maps. If ``texture_coordinates`` are image texture coordinates - For each pixel in the rendered image of height we use the coordinates in texture_coordinates to query corresponding value in texture maps. Note that height :math:`h` and width :math:`w` of the rendered image could be different from :math:`h'` & :math:`w'`. If ``texture_coordinates`` are sparse texture coordinates - For each point in ``texture_coordinates`` we query the corresponding value in ``texture_maps``. Returns: (torch.FloatTensor): interpolated texture of shape :math:`(\text{batch_size}, h, w, \text{num_channels})` or interpolated texture of shape :math:`(\text{batch_size}, \text{num_points}, \text{num_channels})`
Here is the function:
def texture_mapping(texture_coordinates, texture_maps, mode='nearest'):
r"""Interpolates texture_maps by dense or sparse texture_coordinates.
This function supports sampling texture coordinates for:
1. An entire 2D image
2. A sparse point cloud of texture coordinates.
Args:
texture_coordinates(torch.FloatTensor):
dense image texture coordinate, of shape :math:`(\text{batch_size}, h, w, 2)` or
sparse texture coordinate for points, of shape :math:`(\text{batch_size}, \text{num_points}, 2)`
Coordinates are expected to be normalized between [0, 1].
Note that opengl tex coord is different from pytorch's coord.
opengl coord ranges from 0 to 1, y axis is from bottom to top
and it supports circular mode(-0.1 is the same as 0.9)
pytorch coord ranges from -1 to 1, y axis is from top to bottom and does not support circular
filtering is the same as the mode parameter for torch.nn.functional.grid_sample.
texture_maps(torch.FloatTensor):
textures of shape :math:`(\text{batch_size}, \text{num_channels}, h', w')`.
Here, :math:`h'` & :math:`w'` are the height and width of texture maps.
If ``texture_coordinates`` are image texture coordinates -
For each pixel in the rendered image of height we use the coordinates in
texture_coordinates to query corresponding value in texture maps.
Note that height :math:`h` and width :math:`w` of the rendered image could be different from
:math:`h'` & :math:`w'`.
If ``texture_coordinates`` are sparse texture coordinates -
For each point in ``texture_coordinates`` we query the corresponding value in ``texture_maps``.
Returns:
(torch.FloatTensor):
interpolated texture of shape :math:`(\text{batch_size}, h, w, \text{num_channels})` or
interpolated texture of shape :math:`(\text{batch_size}, \text{num_points}, \text{num_channels})`
"""
batch_size = texture_coordinates.shape[0]
num_channels = texture_maps.shape[1]
_texture_coordinates = texture_coordinates.reshape(batch_size, -1, 1, 2)
# convert coord mode from ogl to pytorch
# some opengl texture coordinate is larger than 1 or less than 0
# in opengl it will be normalized by remainder
# we do the same in pytorch
_texture_coordinates = torch.clamp(_texture_coordinates, 0., 1.)
_texture_coordinates = _texture_coordinates * 2 - 1 # [0, 1] to [-1, 1]
_texture_coordinates[:, :, :, 1] = -_texture_coordinates[:, :, :, 1] # reverse y
# sample
texture_interpolates = torch.nn.functional.grid_sample(texture_maps,
_texture_coordinates,
mode=mode,
align_corners=False,
padding_mode='border')
texture_interpolates = texture_interpolates.permute(0, 2, 3, 1)
return texture_interpolates.reshape(batch_size, *texture_coordinates.shape[1:-1], num_channels) | r"""Interpolates texture_maps by dense or sparse texture_coordinates. This function supports sampling texture coordinates for: 1. An entire 2D image 2. A sparse point cloud of texture coordinates. Args: texture_coordinates(torch.FloatTensor): dense image texture coordinate, of shape :math:`(\text{batch_size}, h, w, 2)` or sparse texture coordinate for points, of shape :math:`(\text{batch_size}, \text{num_points}, 2)` Coordinates are expected to be normalized between [0, 1]. Note that opengl tex coord is different from pytorch's coord. opengl coord ranges from 0 to 1, y axis is from bottom to top and it supports circular mode(-0.1 is the same as 0.9) pytorch coord ranges from -1 to 1, y axis is from top to bottom and does not support circular filtering is the same as the mode parameter for torch.nn.functional.grid_sample. texture_maps(torch.FloatTensor): textures of shape :math:`(\text{batch_size}, \text{num_channels}, h', w')`. Here, :math:`h'` & :math:`w'` are the height and width of texture maps. If ``texture_coordinates`` are image texture coordinates - For each pixel in the rendered image of height we use the coordinates in texture_coordinates to query corresponding value in texture maps. Note that height :math:`h` and width :math:`w` of the rendered image could be different from :math:`h'` & :math:`w'`. If ``texture_coordinates`` are sparse texture coordinates - For each point in ``texture_coordinates`` we query the corresponding value in ``texture_maps``. Returns: (torch.FloatTensor): interpolated texture of shape :math:`(\text{batch_size}, h, w, \text{num_channels})` or interpolated texture of shape :math:`(\text{batch_size}, \text{num_points}, \text{num_channels})` |
4,627 | from __future__ import division
import torch
from .. import camera
from ... import ops
The provided code snippet includes necessary dependencies for implementing the `spherical_harmonic_lighting` function. Write a Python function `def spherical_harmonic_lighting(imnormal, lights)` to solve the following problem:
r"""Creates lighting effects. Follows convention set by *Wojciech Jarosz* in `Efficient Monte Carlo Methods for Light Transport in Scattering Media`_. .. deprecated:: 0.13.0 This function is deprecated. Use :func:`kaolin.render.lighting.sh9_irradiance`. Args: imnormal (torch.FloatTensor): per pixel normal, of shape :math:`(\text{batch_size}, \text{height}, \text{width}, 3)` lights (torch.FloatTensor): spherical harmonic lighting parameters, of shape :math:`(\text{batch_size}, 9)` Returns: (torch.FloatTensor): lighting effect, shape of :math:`(\text{batch_size}, \text{height}, \text{width})` .. _Efficient Monte Carlo Methods for Light Transport in Scattering Media: https://cs.dartmouth.edu/~wjarosz/publications/dissertation/appendixB.pdf
Here is the function:
def spherical_harmonic_lighting(imnormal, lights):
r"""Creates lighting effects.
Follows convention set by *Wojciech Jarosz* in
`Efficient Monte Carlo Methods for Light Transport in Scattering Media`_.
.. deprecated:: 0.13.0
This function is deprecated. Use :func:`kaolin.render.lighting.sh9_irradiance`.
Args:
imnormal (torch.FloatTensor):
per pixel normal, of shape :math:`(\text{batch_size}, \text{height}, \text{width}, 3)`
lights (torch.FloatTensor):
spherical harmonic lighting parameters, of shape :math:`(\text{batch_size}, 9)`
Returns:
(torch.FloatTensor):
lighting effect, shape of :math:`(\text{batch_size}, \text{height}, \text{width})`
.. _Efficient Monte Carlo Methods for Light Transport in Scattering Media:
https://cs.dartmouth.edu/~wjarosz/publications/dissertation/appendixB.pdf
"""
# SH lighting
# light effect
x = imnormal[:, :, :, 0]
y = imnormal[:, :, :, 1]
z = imnormal[:, :, :, 2]
# spherical harmonic parameters
band0 = 0.28209479177 * torch.ones_like(x)
band1_m1 = 0.4886025119 * x
band1_0 = 0.4886025119 * z
band1_p1 = 0.4886025119 * y
band2_m2 = 1.09254843059 * (x * y)
band2_m1 = 1.09254843059 * (y * z)
band2_0 = 0.94617469575 * (z * z) - 0.31539156525
band2_p1 = 0.77254840404 * (x * z)
band2_p2 = 0.38627420202 * (x * x - y * y)
bands = torch.stack([band0,
band1_m1, band1_0, band1_p1,
band2_m2, band2_m1, band2_0, band2_p1, band2_p2],
dim=3)
lighting_effect = torch.sum(bands * lights.view(-1, 1, 1, 9),
dim=3)
return lighting_effect | r"""Creates lighting effects. Follows convention set by *Wojciech Jarosz* in `Efficient Monte Carlo Methods for Light Transport in Scattering Media`_. .. deprecated:: 0.13.0 This function is deprecated. Use :func:`kaolin.render.lighting.sh9_irradiance`. Args: imnormal (torch.FloatTensor): per pixel normal, of shape :math:`(\text{batch_size}, \text{height}, \text{width}, 3)` lights (torch.FloatTensor): spherical harmonic lighting parameters, of shape :math:`(\text{batch_size}, 9)` Returns: (torch.FloatTensor): lighting effect, shape of :math:`(\text{batch_size}, \text{height}, \text{width})` .. _Efficient Monte Carlo Methods for Light Transport in Scattering Media: https://cs.dartmouth.edu/~wjarosz/publications/dissertation/appendixB.pdf |
4,628 | from __future__ import division
import torch
from .. import camera
from ... import ops
from . import camera
from . import ops
The provided code snippet includes necessary dependencies for implementing the `prepare_vertices` function. Write a Python function `def prepare_vertices(vertices, faces, camera_proj, camera_rot=None, camera_trans=None, camera_transform=None)` to solve the following problem:
r"""Wrapper function to move and project vertices to cameras then index them with faces. Args: vertices (torch.Tensor): the meshes vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): the meshes faces, of shape :math:`(\text{num_faces}, \text{face_size})`. camera_proj (torch.Tensor): the camera projection vector, of shape :math:`(3, 1)`. camera_rot (torch.Tensor, optional): the camera rotation matrices, of shape :math:`(\text{batch_size}, 3, 3)`. camera_trans (torch.Tensor, optional): the camera translation vectors, of shape :math:`(\text{batch_size}, 3)`. camera_transform (torch.Tensor, optional): the camera transformation matrices, of shape :math:`(\text{batch_size}, 4, 3)`. Replace `camera_trans` and `camera_rot`. Returns: (torch.Tensor, torch.Tensor, torch.Tensor): The vertices in camera coordinate indexed by faces, of shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 3)`. The vertices in camera plan coordinate indexed by faces, of shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 2)`. The face normals, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`.
Here is the function:
def prepare_vertices(vertices, faces, camera_proj, camera_rot=None, camera_trans=None,
camera_transform=None):
r"""Wrapper function to move and project vertices to cameras then index them with faces.
Args:
vertices (torch.Tensor):
the meshes vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor):
the meshes faces, of shape :math:`(\text{num_faces}, \text{face_size})`.
camera_proj (torch.Tensor):
the camera projection vector, of shape :math:`(3, 1)`.
camera_rot (torch.Tensor, optional):
the camera rotation matrices,
of shape :math:`(\text{batch_size}, 3, 3)`.
camera_trans (torch.Tensor, optional):
the camera translation vectors,
of shape :math:`(\text{batch_size}, 3)`.
camera_transform (torch.Tensor, optional):
the camera transformation matrices,
of shape :math:`(\text{batch_size}, 4, 3)`.
Replace `camera_trans` and `camera_rot`.
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor):
The vertices in camera coordinate indexed by faces,
of shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 3)`.
The vertices in camera plan coordinate indexed by faces,
of shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 2)`.
The face normals, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`.
"""
# Apply the transformation from camera_rot and camera_trans or camera_transform
if camera_transform is None:
assert camera_trans is not None and camera_rot is not None, \
"camera_transform or camera_trans and camera_rot must be defined"
vertices_camera = camera.rotate_translate_points(vertices, camera_rot,
camera_trans)
else:
assert camera_trans is None and camera_rot is None, \
"camera_trans and camera_rot must be None when camera_transform is defined"
padded_vertices = torch.nn.functional.pad(
vertices, (0, 1), mode='constant', value=1.
)
vertices_camera = (padded_vertices @ camera_transform)
# Project the vertices on the camera image plan
vertices_image = camera.perspective_camera(vertices_camera, camera_proj)
face_vertices_camera = ops.mesh.index_vertices_by_faces(vertices_camera, faces)
face_vertices_image = ops.mesh.index_vertices_by_faces(vertices_image, faces)
face_normals = ops.mesh.face_normals(face_vertices_camera, unit=True)
return face_vertices_camera, face_vertices_image, face_normals | r"""Wrapper function to move and project vertices to cameras then index them with faces. Args: vertices (torch.Tensor): the meshes vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): the meshes faces, of shape :math:`(\text{num_faces}, \text{face_size})`. camera_proj (torch.Tensor): the camera projection vector, of shape :math:`(3, 1)`. camera_rot (torch.Tensor, optional): the camera rotation matrices, of shape :math:`(\text{batch_size}, 3, 3)`. camera_trans (torch.Tensor, optional): the camera translation vectors, of shape :math:`(\text{batch_size}, 3)`. camera_transform (torch.Tensor, optional): the camera transformation matrices, of shape :math:`(\text{batch_size}, 4, 3)`. Replace `camera_trans` and `camera_rot`. Returns: (torch.Tensor, torch.Tensor, torch.Tensor): The vertices in camera coordinate indexed by faces, of shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 3)`. The vertices in camera plan coordinate indexed by faces, of shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 2)`. The face normals, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`. |
4,629 | import torch
from torch.autograd import Function
from kaolin import _C
from .rasterization import rasterize, _legacy_to_opengl, nvdiff, _get_nvdiff_glctx
def dibr_soft_mask(face_vertices_image, selected_face_idx,
sigmainv=7000, boxlen=0.02, knum=30, multiplier=1000.):
r"""Compute a soft mask generally used with :func:`kaolin.metrics.render.mask_iou`
to compute a silhouette loss, as defined by *Chen, Wenzheng, et al.* in
`Learning to Predict 3D Objects with an Interpolation-based Differentiable Renderer`_ Neurip 2019.
Args:
face_vertices_image (torch.Tensor):
2D positions of the face vertices on image plane,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`,
Note that ``face_vertices_camera`` is projected on image plane (z=-1)
and forms ``face_vertices_image``.
The coordinates of face_vertices_image are between :math:`[-1, 1]`,
which corresponds to normalized image pixels.
selected_face_idx (torch.LongTensor):
Rendered face index,
of shape :math:`(\text{batch_size}, \text{height}, \text{width})`.
See 2nd returned value from :func:`kaolin.render.mesh.rasterize`.
sigmainv (float):
Smoothness term for computing the softmask, the higher the sharper.
The recommended range is :math:`[1/3e-4, 1/3e-5]`. Defaut: 7000.
boxlen (float):
Margin over bounding box of faces which will threshold which pixels
will be influenced by the face. The value should be adapted to sigmainv,
to threshold values close to 0. The recommended range is [0.05, 0.2].
Default: 0.02.
knum (int):
Maximum number of faces that can influence one pixel.
The value should be adapted to boxlen, to avoid missing faces.
The recommended range is [20, 100]. Default: 30.
multiplier (float):
To avoid numerical issue,
we internally enlarge the 2d coordinates by a multiplier.
Default: 1000.
Returns:
(torch.FloatTensor):
The soft mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`.
.. _Learning to Predict 3D Objects with an Interpolation-based Differentiable Renderer:
https://arxiv.org/abs/1908.01210
"""
return DibrSoftMaskCuda.apply(face_vertices_image, selected_face_idx,
sigmainv, boxlen, knum, multiplier)
def rasterize(height,
width,
face_vertices_z,
face_vertices_image,
face_features,
valid_faces=None,
multiplier=None,
eps=None,
backend='cuda'):
r"""Fully differentiable rasterization implementation,
that renders 3D triangle meshes with per-vertex per-face features to
generalized feature "images".
Backend can be selected among, `nvdiffrast library`_ if available (see `installation instructions`_),
or custom cuda ops improved from originally proposed by *Chen, Whenzheng, et al.* in
`Learning to Predict 3D Objects with an Interpolation-based Differentiable Renderer`_ NeurIPS 2019.
.. note::
`nvdiffrast library`_ is relying on OpenGL and so can be faster especially
on larger mesh and resolution.
Args:
height (int): the size of rendered images.
width (int): the size of rendered images.
face_vertices_z (torch.FloatTensor):
3D points depth (z) value of the face vertices in camera coordinate,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`.
face_vertices_image (torch.FloatTensor):
2D positions of the face vertices on image plane,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`,
Note that ``face_vertices_camera`` is projected on image plane (z=-1)
and forms ``face_vertices_image``.
The coordinates of face_vertices_image are between :math:`[-1, 1]`,
which corresponds to normalized image pixels.
face_features (torch.FloatTensor or list of torch.FloatTensor):
Features (per-vertex per-face) to be drawn,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`,
feature is the features dimension,
for instance with vertex colors num_features=3 (R, G, B),
and texture coordinates num_features=2 (X, Y),
or a list of num_features,
of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})`
valid_faces (torch.BoolTensor):
Mask of faces being rasterized,
of shape :math:`(\text{batch_size}, \text{num_faces})`.
Default: All faces are valid.
multiplier (int):
To avoid numeric issue, we enlarge the coordinates by a multiplier.
Used only with ``backend`` 'cuda' at forward pass. Default: 1000.
eps (float):
Epsilon value used to normalize barycentric weights.
Especially matter with small triangles,
to increase or decrease in case of exploding or vanishing gradient.
Ignored if ``backend`` is 'nvdiffrast'.
Default: 1e-8.
backend (string):
Backend used for the rasterization, can be ['cuda', 'nvdiffrast', nvdiffrast_fwd'].
'nvdiffrast_fwd' is using `nvdiffrast library`_ for the forward pass only
and kaolin's custom Op for backward pass.
Returns:
(torch.FloatTensor, torch.LongTensor):
- The rendered features of shape
:math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features})`,
if `face_features` is a list of torch.FloatTensor, return of torch.FloatTensor,
of shapes :math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features[i]})`.
- The rendered face index, -1 is None,
of shape :math:`(\text{batch_size}, \text{height}, \text{width})`.
.. _Learning to Predict 3D Objects with an Interpolation-based Differentiable Renderer:
https://arxiv.org/abs/1908.01210
.. _nvdiffrast library:
https://github.com/NVlabs/nvdiffrast
.. _installation instructions:
https://nvlabs.github.io/nvdiffrast/#installation
"""
if multiplier is None:
multiplier = 1000
elif backend in ['nvdiffrast', 'nvdiffrast_fwd']:
warnings.warn(f'in "rasterize": multiplier is ignored with backend "{backend}"',
UserWarning)
if eps is None:
eps = 1e-8
elif backend == 'nvdiffrast':
warnings.warn(f'in "rasterize": eps is ignored with backend "{backend}"',
UserWarning)
batch_size, num_faces, _ = face_vertices_z.shape
_face_features = torch.cat(face_features, dim=-1) \
if isinstance(face_features, (list, tuple)) else face_features
if backend == 'nvdiffrast':
image_features, face_idx = _nvdiff_rasterize(
height, width, face_vertices_z, face_vertices_image,
_face_features, valid_faces)
elif backend == 'nvdiffrast_fwd':
image_features, face_idx = NvdiffRasterizeFwdCudaBwd.apply(
height, width, face_vertices_z, face_vertices_image,
_face_features, valid_faces, eps)
elif backend == 'cuda':
image_features, face_idx = RasterizeCuda.apply(
height, width, face_vertices_z, face_vertices_image,
_face_features, valid_faces, multiplier, eps)
else:
raise ValueError(f'"{backend}" is not a valid backend, ',
'valid choices are ["cuda", "nvdiffrast", "nvdiffrast_fwd"]')
if isinstance(face_features, (list, tuple)):
_image_features = []
cur_idx = 0
for face_feature in face_features:
_image_features.append(image_features[..., cur_idx:cur_idx + face_feature.shape[-1]])
cur_idx += face_feature.shape[-1]
image_features = tuple(_image_features)
return image_features, face_idx
The provided code snippet includes necessary dependencies for implementing the `dibr_rasterization` function. Write a Python function `def dibr_rasterization(height, width, face_vertices_z, face_vertices_image, face_features, face_normals_z, sigmainv=7000, boxlen=0.02, knum=30, multiplier=None, eps=None, rast_backend='cuda')` to solve the following problem:
r"""Fully differentiable DIB-R renderer implementation, that renders 3D triangle meshes with per-vertex per-face features to generalized feature "images", soft foreground masks, and face index maps. Args: height (int): the size of rendered images. width (int): the size of rendered images. face_vertices_z (torch.FloatTensor): 3D points depth (z) value of the face vertices in camera coordinate, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`. face_vertices_image (torch.FloatTensor): 2D positions of the face vertices on image plane, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`, Note that ``face_vertices_camera`` is projected on image plane (z=-1) and forms ``face_vertices_image``. The coordinates of face_vertices_image are between :math:`[-1, 1]`, which corresponds to normalized image pixels. face_features (torch.FloatTensor or list of torch.FloatTensor): Features (per-vertex per-face) to be drawn, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`, feature is the features dimension, for instance with vertex colors num_features=3 (R, G, B), and texture coordinates num_features=2 (X, Y), or a list of num_features, of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})` face_normals_z (torch.FloatTensor): Normal directions in z axis, of shape :math:`(\text{batch_size}, \text{num_faces})`, only faces with normal z >= 0 will be drawn. sigmainv (float): Smoothness term for computing the softmask, the higher the sharper. The recommended range is :math:`[1/3e-4, 1/3e-5]`. Defaut: 7000. boxlen (float): Margin over bounding box of faces which will threshold which pixels will be influenced by the face. The value should be adapted to sigmainv, to threshold values close to 0. The recommended range is [0.05, 0.2]. Default: 0.02. knum (int): Maximum number of faces that can influence one pixel. The value should be adapted to boxlen, to avoid missing faces. The recommended range is [20, 100]. Default: 30. multiplier (float): To avoid numerical issue, we internally enlarge the 2d coordinates by a multiplier. Default: 1000. eps (float): Epsilon value used to normalize barycentric weights in rasterization. Especially matter with small triangles, to increase or decrease in case of exploding or vanishing gradient. Ignored if ``backend`` is 'nvdiffrast'. Default: 1e-8. backend (string): Backend used for the rasterization, can be ['cuda', 'nvdiffrast', nvdiffrast_fwd']. 'nvdiffrast_fwd' is using `nvdiffrast library` for the forward pass only and kaolin's custom Op for backward pass. Returns: (torch.Tensor, torch.Tensor, torch.LongTensor): - The rendered features of shape :math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features})`, if `face_features` is a list of torch.FloatTensor, return of torch.FloatTensor, of shapes :math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features[i]})`. - The rendered soft mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`. It is generally used with :func:`kaolin.metrics.render.mask_iou` to compute the silhouette loss. - The rendered face index, -1 is None, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`.
Here is the function:
def dibr_rasterization(height, width, face_vertices_z, face_vertices_image,
face_features, face_normals_z, sigmainv=7000,
boxlen=0.02, knum=30, multiplier=None, eps=None,
rast_backend='cuda'):
r"""Fully differentiable DIB-R renderer implementation,
that renders 3D triangle meshes with per-vertex per-face features to
generalized feature "images", soft foreground masks, and face index maps.
Args:
height (int): the size of rendered images.
width (int): the size of rendered images.
face_vertices_z (torch.FloatTensor):
3D points depth (z) value of the face vertices in camera coordinate,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`.
face_vertices_image (torch.FloatTensor):
2D positions of the face vertices on image plane,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`,
Note that ``face_vertices_camera`` is projected on image plane (z=-1)
and forms ``face_vertices_image``.
The coordinates of face_vertices_image are between :math:`[-1, 1]`,
which corresponds to normalized image pixels.
face_features (torch.FloatTensor or list of torch.FloatTensor):
Features (per-vertex per-face) to be drawn,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`,
feature is the features dimension,
for instance with vertex colors num_features=3 (R, G, B),
and texture coordinates num_features=2 (X, Y),
or a list of num_features,
of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})`
face_normals_z (torch.FloatTensor):
Normal directions in z axis, of shape :math:`(\text{batch_size}, \text{num_faces})`,
only faces with normal z >= 0 will be drawn.
sigmainv (float):
Smoothness term for computing the softmask, the higher the sharper.
The recommended range is :math:`[1/3e-4, 1/3e-5]`. Defaut: 7000.
boxlen (float):
Margin over bounding box of faces which will threshold which pixels
will be influenced by the face. The value should be adapted to sigmainv,
to threshold values close to 0. The recommended range is [0.05, 0.2].
Default: 0.02.
knum (int):
Maximum number of faces that can influence one pixel.
The value should be adapted to boxlen, to avoid missing faces.
The recommended range is [20, 100]. Default: 30.
multiplier (float):
To avoid numerical issue,
we internally enlarge the 2d coordinates by a multiplier.
Default: 1000.
eps (float):
Epsilon value used to normalize barycentric weights in rasterization.
Especially matter with small triangles,
to increase or decrease in case of exploding or vanishing gradient.
Ignored if ``backend`` is 'nvdiffrast'.
Default: 1e-8.
backend (string):
Backend used for the rasterization, can be ['cuda', 'nvdiffrast', nvdiffrast_fwd'].
'nvdiffrast_fwd' is using `nvdiffrast library` for the forward pass only
and kaolin's custom Op for backward pass.
Returns:
(torch.Tensor, torch.Tensor, torch.LongTensor):
- The rendered features of shape
:math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features})`,
if `face_features` is a list of torch.FloatTensor, return of torch.FloatTensor,
of shapes :math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features[i]})`.
- The rendered soft mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`.
It is generally used with :func:`kaolin.metrics.render.mask_iou` to compute the silhouette loss.
- The rendered face index, -1 is None,
of shape :math:`(\text{batch_size}, \text{height}, \text{width})`.
"""
interpolated_features, face_idx = rasterize(
height, width,
face_vertices_z,
face_vertices_image,
face_features,
face_normals_z >= 0.,
multiplier,
eps,
rast_backend
)
_multiplier = 1000. if multiplier is None else multiplier
soft_mask = dibr_soft_mask(
face_vertices_image,
face_idx,
sigmainv,
boxlen,
knum,
_multiplier
)
return interpolated_features, soft_mask, face_idx | r"""Fully differentiable DIB-R renderer implementation, that renders 3D triangle meshes with per-vertex per-face features to generalized feature "images", soft foreground masks, and face index maps. Args: height (int): the size of rendered images. width (int): the size of rendered images. face_vertices_z (torch.FloatTensor): 3D points depth (z) value of the face vertices in camera coordinate, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`. face_vertices_image (torch.FloatTensor): 2D positions of the face vertices on image plane, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`, Note that ``face_vertices_camera`` is projected on image plane (z=-1) and forms ``face_vertices_image``. The coordinates of face_vertices_image are between :math:`[-1, 1]`, which corresponds to normalized image pixels. face_features (torch.FloatTensor or list of torch.FloatTensor): Features (per-vertex per-face) to be drawn, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`, feature is the features dimension, for instance with vertex colors num_features=3 (R, G, B), and texture coordinates num_features=2 (X, Y), or a list of num_features, of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})` face_normals_z (torch.FloatTensor): Normal directions in z axis, of shape :math:`(\text{batch_size}, \text{num_faces})`, only faces with normal z >= 0 will be drawn. sigmainv (float): Smoothness term for computing the softmask, the higher the sharper. The recommended range is :math:`[1/3e-4, 1/3e-5]`. Defaut: 7000. boxlen (float): Margin over bounding box of faces which will threshold which pixels will be influenced by the face. The value should be adapted to sigmainv, to threshold values close to 0. The recommended range is [0.05, 0.2]. Default: 0.02. knum (int): Maximum number of faces that can influence one pixel. The value should be adapted to boxlen, to avoid missing faces. The recommended range is [20, 100]. Default: 30. multiplier (float): To avoid numerical issue, we internally enlarge the 2d coordinates by a multiplier. Default: 1000. eps (float): Epsilon value used to normalize barycentric weights in rasterization. Especially matter with small triangles, to increase or decrease in case of exploding or vanishing gradient. Ignored if ``backend`` is 'nvdiffrast'. Default: 1e-8. backend (string): Backend used for the rasterization, can be ['cuda', 'nvdiffrast', nvdiffrast_fwd']. 'nvdiffrast_fwd' is using `nvdiffrast library` for the forward pass only and kaolin's custom Op for backward pass. Returns: (torch.Tensor, torch.Tensor, torch.LongTensor): - The rendered features of shape :math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features})`, if `face_features` is a list of torch.FloatTensor, return of torch.FloatTensor, of shapes :math:`(\text{batch_size}, \text{height}, \text{width}, \text{num_features[i]})`. - The rendered soft mask, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`. It is generally used with :func:`kaolin.metrics.render.mask_iou` to compute the silhouette loss. - The rendered face index, -1 is None, of shape :math:`(\text{batch_size}, \text{height}, \text{width})`. |
4,630 | import torch
from torch.autograd import Function
from kaolin import _C
def _base_naive_deftet_render(
pixel_coords, # (2,)
render_range, # (2,)
face_vertices_image, # (num_faces, 3, 2)
face_vertices_z, # (num_faces, 3)
face_vertices_min, # (num_faces, 2)
face_vertices_max, # (num_faces, 2)
valid_faces, # (num_faces)
eps): # int
"""Base function for :func:`_naive_deftet_sparse_render`
non-batched and for a single pixel
This is because most operations are vectorized on faces
but then only few outputs of those vectorized operations
are used (so the memory is only used temporarily).
"""
in_bbox_mask = torch.logical_and(
pixel_coords.unsqueeze(0) >= face_vertices_min,
pixel_coords.unsqueeze(0) < face_vertices_max)
in_bbox_mask = torch.logical_and(in_bbox_mask[:, 0],
in_bbox_mask[:, 1])
in_bbox_mask = torch.logical_and(in_bbox_mask,
valid_faces)
in_bbox_idx = torch.where(in_bbox_mask)[0]
#ax = ax[in_bbox_idx]
#ay = ay[in_bbox_idx]
#m = m[in_bbox_idx]
#p = p[in_bbox_idx]
#n = n[in_bbox_idx]
#q = q[in_bbox_idx]
#k3 = k3[in_bbox_idx]
#s = pixel_coords[0] - ax
#t = pixel_coords[1] - ay
#k1 = s * q - n * t
#k2 = m * t - s * p
#w1 = k1 / (k3 + NORM_EPS)
#w2 = k2 / (k3 + NORM_EPS)
#w0 = 1. - w1 - w2
face_vertices_image = face_vertices_image[in_bbox_idx]
ax = face_vertices_image[:, 0, 0]
ay = face_vertices_image[:, 0, 1]
bx = face_vertices_image[:, 1, 0]
by = face_vertices_image[:, 1, 1]
cx = face_vertices_image[:, 2, 0]
cy = face_vertices_image[:, 2, 1]
a_edge_x = ax - pixel_coords[0];
a_edge_y = ay - pixel_coords[1];
b_edge_x = bx - pixel_coords[0];
b_edge_y = by - pixel_coords[1];
c_edge_x = cx - pixel_coords[0];
c_edge_y = cy - pixel_coords[1];
_w0 = b_edge_x * c_edge_y - b_edge_y * c_edge_x;
_w1 = c_edge_x * a_edge_y - c_edge_y * a_edge_x;
_w2 = a_edge_x * b_edge_y - a_edge_y * b_edge_x;
norm = _w0 + _w1 + _w2;
norm_eps = eps * torch.sign(norm);
w0 = _w0 / (norm + norm_eps);
w1 = _w1 / (norm + norm_eps);
w2 = _w2 / (norm + norm_eps);
selected_mask = (w0 >= 0.) & (w1 >= 0.) & (w2 >= 0.)
selected_face_vertices_z = face_vertices_z[in_bbox_idx][selected_mask]
selected_weights = torch.stack([
w0[selected_mask], w1[selected_mask], w2[selected_mask]], dim=-1)
pixel_depth = torch.sum(selected_weights * face_vertices_z[in_bbox_idx][selected_mask],
dim=-1)
in_render_range_mask = torch.logical_and(
pixel_depth > render_range[0],
pixel_depth < render_range[1]
)
order = torch.argsort(pixel_depth[in_render_range_mask],
descending=True, dim=0)
return in_bbox_idx[selected_mask][in_render_range_mask][order]
The provided code snippet includes necessary dependencies for implementing the `_naive_deftet_sparse_render` function. Write a Python function `def _naive_deftet_sparse_render(pixel_coords, render_ranges, face_vertices_z, face_vertices_image, face_features, knum, valid_faces=None, eps=1e-8)` to solve the following problem:
r"""Naive implementation of :func:`deftet_sparse_render`. Note: The behavior is different than :func:`deftet_sparse_render` when knum < max(number of faces per pixel), as this returns the first faces by pixel_depth while deftet_render returns the first faces by the mesh order. Note: if `face_camera_vertices` and `face_camera_z` are produced by camera functions in :mod:`kaolin.render.camera`, then the expected range of values for `pixel_coords` is [-1., 1.] and the expected of range values for `render_range` is [-inf, 0.]. Args: pixel_coords (torch.Tensor): Image coordinates to render, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. render_ranges (torch.Tensor): Range of rendering, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. face_vertices_z (torch.Tensor): 3D points values of the face vertices in camera coordinate, values in front of camera are expected to be negative, higher values being closer to the camera. of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`. face_vertices_image (torch.Tensor): 2D positions of the face vertices on image plane, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`, Note that face vertices are projected on image plane (z=-1) to forms face_vertices_image. face_features (torch.Tensor or list of torch.Tensor): Features (per-vertex per-face) to be drawn, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`, feature is the features dimension, for instance with vertex colors num_features=3 (R, G, B), and texture coordinates num_features=2 (X, Y), or a list of num_features, of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})` knum (int): Maximum number of faces that influence one pixel. Default: 300. valid_faces (torch.BoolTensor): Mask of faces being rendered, of shape :math:`(\text{batch_size}, \text{num_faces})`. Default: All faces are valid. eps (float): Epsilon value used to normalize barycentric weights. Default: 1e-8. Returns: (torch.Tensor or list of torch.Tensor, torch.LongTensor): - The rendered features, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{num_features})`, if `face_features` is a list of torch.Tensor, then it returns a list of torch.Tensor, of shapes :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{num_features[i]})`. - The rendered face index, -1 is void, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum})`.
Here is the function:
def _naive_deftet_sparse_render(pixel_coords,
render_ranges,
face_vertices_z,
face_vertices_image,
face_features,
knum,
valid_faces=None,
eps=1e-8):
r"""Naive implementation of :func:`deftet_sparse_render`.
Note:
The behavior is different than :func:`deftet_sparse_render`
when knum < max(number of faces per pixel),
as this returns the first faces by pixel_depth
while deftet_render returns the first faces
by the mesh order.
Note:
if `face_camera_vertices` and `face_camera_z` are produced by
camera functions in :mod:`kaolin.render.camera`,
then the expected range of values for `pixel_coords` is [-1., 1.]
and the expected of range values for `render_range` is [-inf, 0.].
Args:
pixel_coords (torch.Tensor):
Image coordinates to render,
of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`.
render_ranges (torch.Tensor):
Range of rendering,
of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`.
face_vertices_z (torch.Tensor):
3D points values of the face vertices in camera coordinate,
values in front of camera are expected to be negative,
higher values being closer to the camera.
of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`.
face_vertices_image (torch.Tensor):
2D positions of the face vertices on image plane,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`,
Note that face vertices are projected on image plane (z=-1)
to forms face_vertices_image.
face_features (torch.Tensor or list of torch.Tensor):
Features (per-vertex per-face) to be drawn,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`,
feature is the features dimension,
for instance with vertex colors num_features=3 (R, G, B),
and texture coordinates num_features=2 (X, Y),
or a list of num_features,
of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})`
knum (int):
Maximum number of faces that influence one pixel. Default: 300.
valid_faces (torch.BoolTensor):
Mask of faces being rendered,
of shape :math:`(\text{batch_size}, \text{num_faces})`.
Default: All faces are valid.
eps (float):
Epsilon value used to normalize barycentric weights.
Default: 1e-8.
Returns:
(torch.Tensor or list of torch.Tensor, torch.LongTensor):
- The rendered features, of shape
:math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{num_features})`,
if `face_features` is a list of torch.Tensor,
then it returns a list of torch.Tensor, of shapes
:math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{num_features[i]})`.
- The rendered face index, -1 is void, of shape
:math:`(\text{batch_size}, \text{num_pixels}, \text{knum})`.
"""
_face_features = torch.cat(
face_features, dim=-1
) if isinstance(face_features, (list, tuple)) else face_features
batch_size = pixel_coords.shape[0]
num_pixels = pixel_coords.shape[1]
num_faces = face_vertices_z.shape[1]
feat_dim = _face_features.shape[-1]
if valid_faces is None:
valid_faces = torch.ones((batch_size, num_faces),
device=pixel_coords.device,
dtype=torch.bool)
assert pixel_coords.shape == (batch_size, num_pixels, 2)
assert render_ranges.shape == (batch_size, num_pixels, 2)
assert face_vertices_z.shape == (batch_size, num_faces, 3)
assert face_vertices_image.shape == (batch_size, num_faces, 3, 2)
assert _face_features.shape == (batch_size, num_faces, 3, feat_dim)
face_min = torch.min(face_vertices_image, dim=2)[0]
face_max = torch.max(face_vertices_image, dim=2)[0]
selected_face_idx = torch.full((batch_size, num_pixels, knum), -1,
device=pixel_coords.device, dtype=torch.long)
for i in range(batch_size):
for j in range(num_pixels):
_face_idx = _base_naive_deftet_render(
pixel_coords[i, j], render_ranges[i, j], face_vertices_image[i],
face_vertices_z[i], face_min[i], face_max[i], valid_faces[i], eps)
selected_face_idx[i, j, :_face_idx.shape[0]] = _face_idx[:knum]
_idx = selected_face_idx + 1
ax = face_vertices_image[:, :, 0, 0]
ay = face_vertices_image[:, :, 0, 1]
m = face_vertices_image[:, :, 1, 0] - face_vertices_image[:, :, 0, 0]
p = face_vertices_image[:, :, 1, 1] - face_vertices_image[:, :, 0, 1]
n = face_vertices_image[:, :, 2, 0] - face_vertices_image[:, :, 0, 0]
q = face_vertices_image[:, :, 2, 1] - face_vertices_image[:, :, 0, 1]
k3 = m * q - n * p
ax = torch.nn.functional.pad(ax, (1, 0), value=0.)
ay = torch.nn.functional.pad(ay, (1, 0), value=0.)
m = torch.nn.functional.pad(m, (1, 0), value=0.)
p = torch.nn.functional.pad(p, (1, 0), value=0.)
n = torch.nn.functional.pad(n, (1, 0), value=0.)
q = torch.nn.functional.pad(q, (1, 0), value=0.)
k3 = torch.nn.functional.pad(k3, (1, 0), value=1.)
_face_vertices_z = torch.nn.functional.pad(
face_vertices_z, (0, 0, 1, 0), value=0.)
_face_features = torch.nn.functional.pad(
_face_features, (0, 0, 0, 0, 1, 0), value=0.)
_ax = torch.gather(
ax, 1, _idx.reshape(batch_size, -1)
).reshape(batch_size, num_pixels, knum)
_ay = torch.gather(
ay, 1, _idx.reshape(batch_size, -1)
).reshape(batch_size, num_pixels, knum)
_m = torch.gather(
m, 1, _idx.reshape(batch_size, -1)
).reshape(batch_size, num_pixels, knum)
_p = torch.gather(
p, 1, _idx.reshape(batch_size, -1)
).reshape(batch_size, num_pixels, knum)
_n = torch.gather(
n, 1, _idx.reshape(batch_size, -1)
).reshape(batch_size, num_pixels, knum)
_q = torch.gather(
q, 1, _idx.reshape(batch_size, -1)
).reshape(batch_size, num_pixels, knum)
_k3 = torch.gather(
k3, 1, _idx.reshape(batch_size, -1)
).reshape(batch_size, num_pixels, knum)
_face_vertices_z = torch.gather(
_face_vertices_z, 1, _idx.reshape(batch_size, -1, 1).repeat(1, 1, 3)
).reshape(batch_size, num_pixels, knum, 3)
_face_features = torch.gather(
_face_features, 1, _idx.reshape(batch_size, -1, 1, 1).repeat(1, 1, 3, feat_dim)
).reshape(batch_size, num_pixels, knum, 3, feat_dim)
_s = pixel_coords[:, :, :1] - _ax
_t = pixel_coords[:, :, 1:] - _ay
_k1 = _s * _q - _n * _t
_k2 = _m * _t - _s * _p
norm_eps = eps * torch.sign(_k3);
w1 = _k1 / (_k3 + norm_eps)
w2 = _k2 / (_k3 + norm_eps)
w0 = 1. - w1 - w2
weights = torch.stack([w0, w1, w2], dim=-1)
interpolated_features = torch.sum(_face_features * weights.unsqueeze(-1), dim=-2)
if isinstance(face_features, (list, tuple)):
_interpolated_features = []
cur_idx = 0
for face_feature in face_features:
_interpolated_features.append(
interpolated_features[..., cur_idx:cur_idx + face_feature.shape[-1]])
cur_idx += face_feature.shape[-1]
interpolated_features = tuple(_interpolated_features)
return interpolated_features, selected_face_idx | r"""Naive implementation of :func:`deftet_sparse_render`. Note: The behavior is different than :func:`deftet_sparse_render` when knum < max(number of faces per pixel), as this returns the first faces by pixel_depth while deftet_render returns the first faces by the mesh order. Note: if `face_camera_vertices` and `face_camera_z` are produced by camera functions in :mod:`kaolin.render.camera`, then the expected range of values for `pixel_coords` is [-1., 1.] and the expected of range values for `render_range` is [-inf, 0.]. Args: pixel_coords (torch.Tensor): Image coordinates to render, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. render_ranges (torch.Tensor): Range of rendering, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. face_vertices_z (torch.Tensor): 3D points values of the face vertices in camera coordinate, values in front of camera are expected to be negative, higher values being closer to the camera. of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`. face_vertices_image (torch.Tensor): 2D positions of the face vertices on image plane, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`, Note that face vertices are projected on image plane (z=-1) to forms face_vertices_image. face_features (torch.Tensor or list of torch.Tensor): Features (per-vertex per-face) to be drawn, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`, feature is the features dimension, for instance with vertex colors num_features=3 (R, G, B), and texture coordinates num_features=2 (X, Y), or a list of num_features, of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})` knum (int): Maximum number of faces that influence one pixel. Default: 300. valid_faces (torch.BoolTensor): Mask of faces being rendered, of shape :math:`(\text{batch_size}, \text{num_faces})`. Default: All faces are valid. eps (float): Epsilon value used to normalize barycentric weights. Default: 1e-8. Returns: (torch.Tensor or list of torch.Tensor, torch.LongTensor): - The rendered features, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{num_features})`, if `face_features` is a list of torch.Tensor, then it returns a list of torch.Tensor, of shapes :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{num_features[i]})`. - The rendered face index, -1 is void, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum})`. |
4,631 | import torch
from torch.autograd import Function
from kaolin import _C
class DeftetSparseRenderer(Function):
"""torch.autograd.Function for :func:`deftet_sparse_render`."""
def forward(ctx, pixel_coords, render_ranges, face_vertices_z,
face_vertices_image, face_features, knum, eps):
# dims
batch_size = face_vertices_z.shape[0]
num_faces = face_vertices_z.shape[1]
feat_dim = face_features.shape[-1]
pixel_num = pixel_coords.shape[1]
pixel_coords = pixel_coords.contiguous()
render_ranges = render_ranges.contiguous()
face_vertices_z = face_vertices_z.contiguous()
face_vertices_image = face_vertices_image.contiguous()
face_features = face_features.contiguous()
# bbox
face_min = torch.min(face_vertices_image, dim=2)[0]
face_max = torch.max(face_vertices_image, dim=2)[0]
face_bboxes = torch.cat((face_min, face_max), dim=2)
face_idx, pixel_depth, w0, w1 = _C.render.mesh.deftet_sparse_render_forward_cuda(
face_vertices_z,
face_vertices_image,
face_bboxes,
pixel_coords,
render_ranges,
knum,
eps)
sorted_idx = torch.argsort(pixel_depth, descending=True, dim=-1)
sorted_face_idx = torch.gather(face_idx, -1, sorted_idx).contiguous()
sorted_w0 = torch.gather(w0, -1, sorted_idx)
sorted_w1 = torch.gather(w1, -1, sorted_idx)
sorted_w2 = (sorted_face_idx != -1).float() - (sorted_w0 + sorted_w1)
_idx = sorted_face_idx + 1
_idx = _idx.reshape(batch_size, -1, 1, 1).expand(
batch_size, pixel_num * knum, 3, feat_dim)
selected_features = torch.gather(
torch.nn.functional.pad(face_features, (0, 0, 0, 0, 1, 0), value=0.), 1, _idx).reshape(
batch_size, pixel_num, knum, 3, feat_dim)
weights = torch.stack([sorted_w0, sorted_w1, sorted_w2], dim=-1).contiguous()
interpolated_features = torch.sum(weights.unsqueeze(-1) * selected_features,
dim=-2).contiguous()
ctx.save_for_backward(sorted_face_idx, weights, face_vertices_image, face_features)
ctx.mark_non_differentiable(sorted_face_idx)
ctx.eps = eps
return interpolated_features, sorted_face_idx
def backward(ctx, grad_interpolated_features, grad_face_idx):
face_idx, weights, face_vertices_image, face_features = ctx.saved_tensors
eps = ctx.eps
grad_face_vertices_image = torch.zeros_like(face_vertices_image)
grad_face_features = torch.zeros_like(face_features)
grad_face_vertices_image, grad_face_features = \
_C.render.mesh.deftet_sparse_render_backward_cuda(
grad_interpolated_features.contiguous(), face_idx, weights,
face_vertices_image, face_features, eps)
return None, None, None, grad_face_vertices_image, grad_face_features, None, None
The provided code snippet includes necessary dependencies for implementing the `deftet_sparse_render` function. Write a Python function `def deftet_sparse_render(pixel_coords, render_ranges, face_vertices_z, face_vertices_image, face_features, knum=300, eps=1e-8)` to solve the following problem:
r"""Fully differentiable volumetric renderer devised by *Gao et al.* in `Learning Deformable Tetrahedral Meshes for 3D Reconstruction`_ NeurIPS 2020. This is rasterizing a mesh w.r.t to a list of pixel coordinates, but instead of just rendering the closest intersection. it will render all the intersections sorted by depth order, returning the interpolated features and the indexes of faces intersected for each intersection in padded arrays. Note: The function is not differentiable w.r.t pixel_coords. Note: if `face_camera_vertices` and `face_camera_z` are produced by camera functions in :mod:`kaolin.render.camera`, then the expected range of values for `pixel_coords` is [-1., 1.] and the expected of range values for `render_range` is [-inf, 0.]. Args: pixel_coords (torch.Tensor): Image coordinates to render, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. render_ranges (torch.Tensor): Depth ranges on which intersection get rendered, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. face_vertices_z (torch.Tensor): 3D points values of the face vertices in camera coordinate, values in front of camera are expected to be negative, higher values being closer to the camera. of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`. face_vertices_image (torch.Tensor): 2D positions of the face vertices on image plane, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`, Note that face vertices are projected on image plane (z=-1) to forms face_vertices_image. face_features (torch.Tensor or list of torch.Tensor): Features (per-vertex per-face) to be drawn, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`, feature is the features dimension, for instance with vertex colors num_features=3 (R, G, B), and texture coordinates num_features=2 (X, Y), or a list of num_features, of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})`. knum (int): Maximum number of faces that influence one pixel. Default: 300. eps (float): Epsilon value used to normalize barycentric weights. Default: 1e-8. Returns: (torch.Tensor or list of torch.Tensor, torch.LongTensor): - The rendered features, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{feature_dim})`, if `face_features` is a list of torch.Tensor, then it returns a list of torch.Tensor, of shapes :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{feature_dim[i]})`. - The rendered face index, -1 is void, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum})`. .. _Learning Deformable Tetrahedral Meshes for 3D Reconstruction: https://arxiv.org/abs/2011.01437
Here is the function:
def deftet_sparse_render(pixel_coords, render_ranges, face_vertices_z,
face_vertices_image, face_features, knum=300, eps=1e-8):
r"""Fully differentiable volumetric renderer devised by *Gao et al.* in
`Learning Deformable Tetrahedral Meshes for 3D Reconstruction`_ NeurIPS 2020.
This is rasterizing a mesh w.r.t to a list of pixel coordinates,
but instead of just rendering the closest intersection.
it will render all the intersections sorted by depth order,
returning the interpolated features and the indexes of faces intersected
for each intersection in padded arrays.
Note:
The function is not differentiable w.r.t pixel_coords.
Note:
if `face_camera_vertices` and `face_camera_z` are produced by
camera functions in :mod:`kaolin.render.camera`,
then the expected range of values for `pixel_coords` is [-1., 1.]
and the expected of range values for `render_range` is [-inf, 0.].
Args:
pixel_coords (torch.Tensor):
Image coordinates to render,
of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`.
render_ranges (torch.Tensor):
Depth ranges on which intersection get rendered,
of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`.
face_vertices_z (torch.Tensor):
3D points values of the face vertices in camera coordinate,
values in front of camera are expected to be negative,
higher values being closer to the camera.
of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`.
face_vertices_image (torch.Tensor):
2D positions of the face vertices on image plane,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`,
Note that face vertices are projected on image plane (z=-1)
to forms face_vertices_image.
face_features (torch.Tensor or list of torch.Tensor):
Features (per-vertex per-face) to be drawn,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`,
feature is the features dimension,
for instance with vertex colors num_features=3 (R, G, B),
and texture coordinates num_features=2 (X, Y),
or a list of num_features,
of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})`.
knum (int):
Maximum number of faces that influence one pixel. Default: 300.
eps (float):
Epsilon value used to normalize barycentric weights.
Default: 1e-8.
Returns:
(torch.Tensor or list of torch.Tensor, torch.LongTensor):
- The rendered features, of shape
:math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{feature_dim})`,
if `face_features` is a list of torch.Tensor,
then it returns a list of torch.Tensor, of shapes
:math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{feature_dim[i]})`.
- The rendered face index, -1 is void, of shape
:math:`(\text{batch_size}, \text{num_pixels}, \text{knum})`.
.. _Learning Deformable Tetrahedral Meshes for 3D Reconstruction:
https://arxiv.org/abs/2011.01437
"""
_face_features = torch.cat(
face_features, dim=-1
) if isinstance(face_features, (list, tuple)) else face_features
image_features, face_idx = DeftetSparseRenderer.apply(
pixel_coords, render_ranges, face_vertices_z,
face_vertices_image, _face_features, knum, eps)
if isinstance(face_features, (list, tuple)):
_image_features = []
cur_idx = 0
for face_feature in face_features:
_image_features.append(image_features[..., cur_idx:cur_idx + face_feature.shape[-1]])
cur_idx += face_feature.shape[-1]
image_features = tuple(_image_features)
return image_features, face_idx | r"""Fully differentiable volumetric renderer devised by *Gao et al.* in `Learning Deformable Tetrahedral Meshes for 3D Reconstruction`_ NeurIPS 2020. This is rasterizing a mesh w.r.t to a list of pixel coordinates, but instead of just rendering the closest intersection. it will render all the intersections sorted by depth order, returning the interpolated features and the indexes of faces intersected for each intersection in padded arrays. Note: The function is not differentiable w.r.t pixel_coords. Note: if `face_camera_vertices` and `face_camera_z` are produced by camera functions in :mod:`kaolin.render.camera`, then the expected range of values for `pixel_coords` is [-1., 1.] and the expected of range values for `render_range` is [-inf, 0.]. Args: pixel_coords (torch.Tensor): Image coordinates to render, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. render_ranges (torch.Tensor): Depth ranges on which intersection get rendered, of shape :math:`(\text{batch_size}, \text{num_pixels}, 2)`. face_vertices_z (torch.Tensor): 3D points values of the face vertices in camera coordinate, values in front of camera are expected to be negative, higher values being closer to the camera. of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`. face_vertices_image (torch.Tensor): 2D positions of the face vertices on image plane, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`, Note that face vertices are projected on image plane (z=-1) to forms face_vertices_image. face_features (torch.Tensor or list of torch.Tensor): Features (per-vertex per-face) to be drawn, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`, feature is the features dimension, for instance with vertex colors num_features=3 (R, G, B), and texture coordinates num_features=2 (X, Y), or a list of num_features, of shapes :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim[i]})`. knum (int): Maximum number of faces that influence one pixel. Default: 300. eps (float): Epsilon value used to normalize barycentric weights. Default: 1e-8. Returns: (torch.Tensor or list of torch.Tensor, torch.LongTensor): - The rendered features, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{feature_dim})`, if `face_features` is a list of torch.Tensor, then it returns a list of torch.Tensor, of shapes :math:`(\text{batch_size}, \text{num_pixels}, \text{knum}, \text{feature_dim[i]})`. - The rendered face index, -1 is void, of shape :math:`(\text{batch_size}, \text{num_pixels}, \text{knum})`. .. _Learning Deformable Tetrahedral Meshes for 3D Reconstruction: https://arxiv.org/abs/2011.01437 |
4,632 | from __future__ import division
import torch
import torch.nn
from numpy import tan
The provided code snippet includes necessary dependencies for implementing the `rotate_translate_points` function. Write a Python function `def rotate_translate_points(points, camera_rot, camera_trans)` to solve the following problem:
r"""Rotate and translate 3D points on based on rotation matrix and transformation matrix. Formula is :math:`\text{P_new} = R * (\text{P_old} - T)` Args: points (torch.FloatTensor): 3D points, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. camera_rot (torch.FloatTensor): rotation matrix, of shape :math:`(\text{batch_size}, 3, 3)`. camera_trans (torch.FloatTensor): translation matrix, of shape :math:`(\text{batch_size}, 3, 1)`. Returns: (torch.FloatTensor): 3D points in new rotation, of same shape than `points`.
Here is the function:
def rotate_translate_points(points, camera_rot, camera_trans):
r"""Rotate and translate 3D points on based on rotation matrix and transformation matrix.
Formula is :math:`\text{P_new} = R * (\text{P_old} - T)`
Args:
points (torch.FloatTensor): 3D points, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`.
camera_rot (torch.FloatTensor): rotation matrix, of shape :math:`(\text{batch_size}, 3, 3)`.
camera_trans (torch.FloatTensor): translation matrix, of shape :math:`(\text{batch_size}, 3, 1)`.
Returns:
(torch.FloatTensor): 3D points in new rotation, of same shape than `points`.
"""
translated_points = points - camera_trans.view(-1, 1, 3)
output_points = torch.matmul(translated_points, camera_rot.permute(0, 2, 1))
return output_points | r"""Rotate and translate 3D points on based on rotation matrix and transformation matrix. Formula is :math:`\text{P_new} = R * (\text{P_old} - T)` Args: points (torch.FloatTensor): 3D points, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. camera_rot (torch.FloatTensor): rotation matrix, of shape :math:`(\text{batch_size}, 3, 3)`. camera_trans (torch.FloatTensor): translation matrix, of shape :math:`(\text{batch_size}, 3, 1)`. Returns: (torch.FloatTensor): 3D points in new rotation, of same shape than `points`. |
4,633 | from __future__ import division
import torch
import torch.nn
from numpy import tan
The provided code snippet includes necessary dependencies for implementing the `generate_rotate_translate_matrices` function. Write a Python function `def generate_rotate_translate_matrices(camera_position, look_at, camera_up_direction)` to solve the following problem:
r"""Generate rotation and translation matrix for given camera parameters. Formula is :math:`\text{P_cam} = \text{rot_mtx} * (\text{P_world} - \text{trans_mtx})` Args: camera_position (torch.FloatTensor): camera positions of shape :math:`(\text{batch_size}, 3)`, it means where your cameras are look_at (torch.FloatTensor): where the camera is watching, of shape :math:`(\text{batch_size}, 3)`, camera_up_direction (torch.FloatTensor): camera up directions of shape :math:`(\text{batch_size}, 3)`, it means what are your camera up directions, generally [0, 1, 0] Returns: (torch.FloatTensor, torch.FloatTensor): the camera rotation matrix of shape :math:`(\text{batch_size}, 3, 3)` and the camera transformation matrix of shape :math:`(\text{batch_size}, 3)`
Here is the function:
def generate_rotate_translate_matrices(camera_position, look_at, camera_up_direction):
r"""Generate rotation and translation matrix for given camera parameters.
Formula is :math:`\text{P_cam} = \text{rot_mtx} * (\text{P_world} - \text{trans_mtx})`
Args:
camera_position (torch.FloatTensor):
camera positions of shape :math:`(\text{batch_size}, 3)`,
it means where your cameras are
look_at (torch.FloatTensor):
where the camera is watching, of shape :math:`(\text{batch_size}, 3)`,
camera_up_direction (torch.FloatTensor):
camera up directions of shape :math:`(\text{batch_size}, 3)`,
it means what are your camera up directions, generally [0, 1, 0]
Returns:
(torch.FloatTensor, torch.FloatTensor):
the camera rotation matrix of shape :math:`(\text{batch_size}, 3, 3)`
and the camera transformation matrix of shape :math:`(\text{batch_size}, 3)`
"""
# 3 variables should be length 1
camz_bx3 = look_at - camera_position
camz_length_bx1 = camz_bx3.norm(dim=1, keepdim=True)
camz_bx3 = camz_bx3 / (camz_length_bx1 + 1e-10)
# torch.cross don't support broadcast
# (https://github.com/pytorch/pytorch/issues/39656)
if camera_up_direction.shape[0] < camz_bx3.shape[0]:
camera_up_direction = camera_up_direction.repeat(camz_bx3.shape[0], 1)
elif camera_up_direction.shape[0] > camz_bx3.shape[0]:
camz_bx3 = camz_bx3.repeat(camera_up_direction.shape[0], 1)
camx_bx3 = torch.cross(camz_bx3, camera_up_direction, dim=1)
camx_len_bx1 = camx_bx3.norm(dim=1, keepdim=True)
camx_bx3 = camx_bx3 / (camx_len_bx1 + 1e-10)
camy_bx3 = torch.cross(camx_bx3, camz_bx3, dim=1)
camy_len_bx3 = camy_bx3.norm(dim=1, keepdim=True)
camy_bx3 = camy_bx3 / (camy_len_bx3 + 1e-10)
mtx_bx3x3 = torch.stack([camx_bx3, camy_bx3, -camz_bx3], dim=1)
shift_bx3 = camera_position
return mtx_bx3x3, shift_bx3 | r"""Generate rotation and translation matrix for given camera parameters. Formula is :math:`\text{P_cam} = \text{rot_mtx} * (\text{P_world} - \text{trans_mtx})` Args: camera_position (torch.FloatTensor): camera positions of shape :math:`(\text{batch_size}, 3)`, it means where your cameras are look_at (torch.FloatTensor): where the camera is watching, of shape :math:`(\text{batch_size}, 3)`, camera_up_direction (torch.FloatTensor): camera up directions of shape :math:`(\text{batch_size}, 3)`, it means what are your camera up directions, generally [0, 1, 0] Returns: (torch.FloatTensor, torch.FloatTensor): the camera rotation matrix of shape :math:`(\text{batch_size}, 3, 3)` and the camera transformation matrix of shape :math:`(\text{batch_size}, 3)` |
4,634 | from __future__ import division
import torch
import torch.nn
from numpy import tan
The provided code snippet includes necessary dependencies for implementing the `generate_transformation_matrix` function. Write a Python function `def generate_transformation_matrix(camera_position, look_at, camera_up_direction)` to solve the following problem:
r"""Generate transformation matrix for given camera parameters. Formula is :math:`\text{P_cam} = \text{P_world} * \text{transformation_mtx}`, with :math:`\text{P_world}` being the points coordinates padded with 1. Args: camera_position (torch.FloatTensor): camera positions of shape :math:`(\text{batch_size}, 3)`, it means where your cameras are look_at (torch.FloatTensor): where the camera is watching, of shape :math:`(\text{batch_size}, 3)`, camera_up_direction (torch.FloatTensor): camera up directions of shape :math:`(\text{batch_size}, 3)`, it means what are your camera up directions, generally [0, 1, 0] Returns: (torch.FloatTensor): The camera transformation matrix of shape :math:`(\text{batch_size}, 4, 3)`.
Here is the function:
def generate_transformation_matrix(camera_position, look_at, camera_up_direction):
r"""Generate transformation matrix for given camera parameters.
Formula is :math:`\text{P_cam} = \text{P_world} * \text{transformation_mtx}`,
with :math:`\text{P_world}` being the points coordinates padded with 1.
Args:
camera_position (torch.FloatTensor):
camera positions of shape :math:`(\text{batch_size}, 3)`,
it means where your cameras are
look_at (torch.FloatTensor):
where the camera is watching, of shape :math:`(\text{batch_size}, 3)`,
camera_up_direction (torch.FloatTensor):
camera up directions of shape :math:`(\text{batch_size}, 3)`,
it means what are your camera up directions, generally [0, 1, 0]
Returns:
(torch.FloatTensor):
The camera transformation matrix of shape :math:`(\text{batch_size}, 4, 3)`.
"""
z_axis = (camera_position - look_at)
z_axis /= z_axis.norm(dim=1, keepdim=True)
# torch.cross don't support broadcast
# (https://github.com/pytorch/pytorch/issues/39656)
if camera_up_direction.shape[0] < z_axis.shape[0]:
camera_up_direction = camera_up_direction.repeat(z_axis.shape[0], 1)
elif z_axis.shape[0] < camera_up_direction.shape[0]:
z_axis = z_axis.repeat(camera_up_direction.shape[0], 1)
x_axis = torch.cross(camera_up_direction, z_axis, dim=1)
x_axis /= x_axis.norm(dim=1, keepdim=True)
y_axis = torch.cross(z_axis, x_axis, dim=1)
rot_part = torch.stack([x_axis, y_axis, z_axis], dim=2)
trans_part = (-camera_position.unsqueeze(1) @ rot_part)
return torch.cat([rot_part, trans_part], dim=1) | r"""Generate transformation matrix for given camera parameters. Formula is :math:`\text{P_cam} = \text{P_world} * \text{transformation_mtx}`, with :math:`\text{P_world}` being the points coordinates padded with 1. Args: camera_position (torch.FloatTensor): camera positions of shape :math:`(\text{batch_size}, 3)`, it means where your cameras are look_at (torch.FloatTensor): where the camera is watching, of shape :math:`(\text{batch_size}, 3)`, camera_up_direction (torch.FloatTensor): camera up directions of shape :math:`(\text{batch_size}, 3)`, it means what are your camera up directions, generally [0, 1, 0] Returns: (torch.FloatTensor): The camera transformation matrix of shape :math:`(\text{batch_size}, 4, 3)`. |
4,635 | from __future__ import division
import torch
import torch.nn
from numpy import tan
The provided code snippet includes necessary dependencies for implementing the `perspective_camera` function. Write a Python function `def perspective_camera(points, camera_proj)` to solve the following problem:
r"""Projects 3D points on 2D images in perspective projection mode. Args: points (torch.FloatTensor): 3D points in camera coordinate, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. camera_proj (torch.FloatTensor): projection matrix of shape :math:`(3, 1)`. Returns: (torch.FloatTensor): 2D points on image plane of shape :math:`(\text{batch_size}, \text{num_points}, 2)`.
Here is the function:
def perspective_camera(points, camera_proj):
r"""Projects 3D points on 2D images in perspective projection mode.
Args:
points (torch.FloatTensor):
3D points in camera coordinate, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`.
camera_proj (torch.FloatTensor): projection matrix of shape :math:`(3, 1)`.
Returns:
(torch.FloatTensor):
2D points on image plane of shape :math:`(\text{batch_size}, \text{num_points}, 2)`.
"""
# perspective, use only one camera intrinsic parameter
# TODO(cfujitsang): if we have to permute and reshape the camera matrix
# does that mean that they are wrong in the first place ?
projected_points = points * camera_proj.view(-1, 1, 3)
projected_2d_points = projected_points[:, :, :2] / projected_points[:, :, 2:3]
return projected_2d_points | r"""Projects 3D points on 2D images in perspective projection mode. Args: points (torch.FloatTensor): 3D points in camera coordinate, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. camera_proj (torch.FloatTensor): projection matrix of shape :math:`(3, 1)`. Returns: (torch.FloatTensor): 2D points on image plane of shape :math:`(\text{batch_size}, \text{num_points}, 2)`. |
4,636 | from __future__ import division
import torch
import torch.nn
from numpy import tan
The provided code snippet includes necessary dependencies for implementing the `generate_perspective_projection` function. Write a Python function `def generate_perspective_projection(fovyangle, ratio=1.0, dtype=torch.float)` to solve the following problem:
r"""Generate perspective projection matrix for a given camera fovy angle. Args: fovyangle (float): field of view angle of y axis, :math:`tan(\frac{fovy}{2}) = \frac{y}{f}`. ratio (float): aspect ratio :math:`(\frac{width}{height})`. Default: 1.0. Returns: (torch.FloatTensor): camera projection matrix, of shape :math:`(3, 1)`.
Here is the function:
def generate_perspective_projection(fovyangle,
ratio=1.0,
dtype=torch.float):
r"""Generate perspective projection matrix for a given camera fovy angle.
Args:
fovyangle (float):
field of view angle of y axis, :math:`tan(\frac{fovy}{2}) = \frac{y}{f}`.
ratio (float):
aspect ratio :math:`(\frac{width}{height})`. Default: 1.0.
Returns:
(torch.FloatTensor):
camera projection matrix, of shape :math:`(3, 1)`.
"""
tanfov = tan(fovyangle / 2.0)
return torch.tensor([[1.0 / (ratio * tanfov)], [1.0 / tanfov], [-1]],
dtype=dtype) | r"""Generate perspective projection matrix for a given camera fovy angle. Args: fovyangle (float): field of view angle of y axis, :math:`tan(\frac{fovy}{2}) = \frac{y}{f}`. ratio (float): aspect ratio :math:`(\frac{width}{height})`. Default: 1.0. Returns: (torch.FloatTensor): camera projection matrix, of shape :math:`(3, 1)`. |
4,637 | import torch
The provided code snippet includes necessary dependencies for implementing the `blender_coords` function. Write a Python function `def blender_coords()` to solve the following problem:
Blender world coordinates are right handed, with the z axis pointing upwards :: Z Y ^ / | / |---------> X
Here is the function:
def blender_coords():
"""Blender world coordinates are right handed, with the z axis pointing upwards
::
Z Y
^ /
| /
|---------> X
"""
return torch.tensor([[1, 0, 0],
[0, 0, 1],
[0, -1, 0]]) | Blender world coordinates are right handed, with the z axis pointing upwards :: Z Y ^ / | / |---------> X |
4,638 | import torch
The provided code snippet includes necessary dependencies for implementing the `opengl_coords` function. Write a Python function `def opengl_coords()` to solve the following problem:
Contemporary OpenGL doesn't enforce specific handedness on world coordinates. However it is common standard to define OpenGL world coordinates as right handed, with the y axis pointing upwards (cartesian):: Y ^ | |---------> X / Z
Here is the function:
def opengl_coords():
"""Contemporary OpenGL doesn't enforce specific handedness on world coordinates.
However it is common standard to define OpenGL world coordinates as right handed,
with the y axis pointing upwards (cartesian)::
Y
^
|
|---------> X
/
Z
"""
return torch.tensor([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]) | Contemporary OpenGL doesn't enforce specific handedness on world coordinates. However it is common standard to define OpenGL world coordinates as right handed, with the y axis pointing upwards (cartesian):: Y ^ | |---------> X / Z |
4,639 | from __future__ import annotations
import functools
from copy import deepcopy
import torch
import inspect
from typing import Sequence, List, Dict, Union, Tuple, Type, FrozenSet, Callable
from torch.types import _float, _bool
from .extrinsics import CameraExtrinsics, ExtrinsicsParamsDefEnum
from .intrinsics import CameraIntrinsics, IntrinsicsParamsDefEnum
from .intrinsics_ortho import OrthographicIntrinsics
from .intrinsics_pinhole import PinholeIntrinsics
_HANDLED_TORCH_FUNCTIONS = dict()
The provided code snippet includes necessary dependencies for implementing the `implements` function. Write a Python function `def implements(torch_function)` to solve the following problem:
Registers a torch function override for Camera
Here is the function:
def implements(torch_function):
"""Registers a torch function override for Camera"""
@functools.wraps(torch_function)
def decorator(func):
_HANDLED_TORCH_FUNCTIONS[torch_function] = func
return func
return decorator | Registers a torch function override for Camera |
4,640 | from __future__ import annotations
import functools
from copy import deepcopy
import torch
import inspect
from typing import Sequence, List, Dict, Union, Tuple, Type, FrozenSet, Callable
from torch.types import _float, _bool
from .extrinsics import CameraExtrinsics, ExtrinsicsParamsDefEnum
from .intrinsics import CameraIntrinsics, IntrinsicsParamsDefEnum
from .intrinsics_ortho import OrthographicIntrinsics
from .intrinsics_pinhole import PinholeIntrinsics
CameraModuleType = Union[Type[CameraExtrinsics], Type[CameraIntrinsics]]
The provided code snippet includes necessary dependencies for implementing the `_gather_constructors` function. Write a Python function `def _gather_constructors(*cam_modules: CameraModuleType) -> Dict[FrozenSet, Tuple[Callable, List]]` to solve the following problem:
r"""Given a variable list of camera modules, returns a mapping of their constructors, used to disambiguate which ctor should be called according to kwargs. The mapping can be used to disambiguate which ctor should be called according to kwargs. Args: *cam_modules (CameraModuleType): A variable list of CameraExtrinsics or CameraIntrinsic classes or their subtypes. This function will gather constructors from these classes. Return: (dict): a mapping of their constructors in the following format: `unique_arg_identifiers -> (func, args) where: - `unique_arg_identifiers` is a hashable, frozenset of the mandatory args that uniquely identify each constructor. - `func` is a reference to the ctor class function - `args` is the full list of kwargs required by the ctor Example: >>> _gather_constructors(PinholeIntrinsics) { frozenset(('width', 'height', 'focal_x')): (PinholeIntrinsics.from_focal, [width, height, focal_x, focal_y, x0, y0, ..]) frozenset(('width', 'height', 'fov')): PinholeIntrinsics.from_fov, [width, height, fov, fov_direction, x0, y0, ..]) }
Here is the function:
def _gather_constructors(*cam_modules: CameraModuleType) -> Dict[FrozenSet, Tuple[Callable, List]]:
r"""Given a variable list of camera modules, returns a mapping of their constructors,
used to disambiguate which ctor should be called according to kwargs.
The mapping can be used to disambiguate which ctor should be called according to kwargs.
Args:
*cam_modules (CameraModuleType):
A variable list of CameraExtrinsics or CameraIntrinsic classes or their subtypes. This function
will gather constructors from these classes.
Return:
(dict):
a mapping of their constructors in the following format:
`unique_arg_identifiers -> (func, args)
where:
- `unique_arg_identifiers` is a hashable, frozenset of the mandatory args that uniquely identify each
constructor.
- `func` is a reference to the ctor class function
- `args` is the full list of kwargs required by the ctor
Example:
>>> _gather_constructors(PinholeIntrinsics)
{
frozenset(('width', 'height', 'focal_x')):
(PinholeIntrinsics.from_focal, [width, height, focal_x, focal_y, x0, y0, ..])
frozenset(('width', 'height', 'fov')):
PinholeIntrinsics.from_fov, [width, height, fov, fov_direction, x0, y0, ..])
}
"""
ctors = []
for cam_module in cam_modules:
# Constructors are @classmethod with a 'from_' prefix
is_ctor = lambda x: inspect.ismethod(x) and x.__name__.startswith('from_')
# Get all methods that satisfy the constructor predicate.
ctors.extend(inspect.getmembers(cam_module, predicate=is_ctor))
# The return value is a tuple per entry, take the 2nd element with the method reference
ctors = [c[1] for c in ctors]
def _ctor_funcs_to_args(func):
argspec = inspect.getfullargspec(func)
args = argspec.args[1:]
if 'cls' in args:
args.remove('cls')
mandatory_args = args[:-len(argspec.defaults)]
key = frozenset(mandatory_args)
return key, (func, args)
return dict(map(_ctor_funcs_to_args, ctors)) | r"""Given a variable list of camera modules, returns a mapping of their constructors, used to disambiguate which ctor should be called according to kwargs. The mapping can be used to disambiguate which ctor should be called according to kwargs. Args: *cam_modules (CameraModuleType): A variable list of CameraExtrinsics or CameraIntrinsic classes or their subtypes. This function will gather constructors from these classes. Return: (dict): a mapping of their constructors in the following format: `unique_arg_identifiers -> (func, args) where: - `unique_arg_identifiers` is a hashable, frozenset of the mandatory args that uniquely identify each constructor. - `func` is a reference to the ctor class function - `args` is the full list of kwargs required by the ctor Example: >>> _gather_constructors(PinholeIntrinsics) { frozenset(('width', 'height', 'focal_x')): (PinholeIntrinsics.from_focal, [width, height, focal_x, focal_y, x0, y0, ..]) frozenset(('width', 'height', 'fov')): PinholeIntrinsics.from_fov, [width, height, fov, fov_direction, x0, y0, ..]) } |
4,641 | from __future__ import annotations
import functools
from copy import deepcopy
import torch
import inspect
from typing import Sequence, List, Dict, Union, Tuple, Type, FrozenSet, Callable
from torch.types import _float, _bool
from .extrinsics import CameraExtrinsics, ExtrinsicsParamsDefEnum
from .intrinsics import CameraIntrinsics, IntrinsicsParamsDefEnum
from .intrinsics_ortho import OrthographicIntrinsics
from .intrinsics_pinhole import PinholeIntrinsics
class Camera:
r"""Camera is a one-stop class for all camera related differentiable / non-differentiable transformations.
Camera objects are represented by *batched* instances of 2 submodules:
- :class:`CameraExtrinsics`: The extrinsics properties of the camera (position, orientation).
These are usually embedded in the view matrix, used to transform vertices from world space to camera space.
- :class:`CameraIntrinsics`: The intrinsics properties of the lens
(such as field of view / focal length in the case of pinhole cameras).
Intrinsics parameters vary between different lens type,
and therefore multiple CameraIntrinsics subclasses exist,
to support different types of cameras: pinhole / perspective, orthographic, fisheye, and so forth.
For pinehole and orthographic lens, the intrinsics are embedded in a projection matrix.
The intrinsics module can be used to transform vertices from camera space to Normalized Device Coordinates.
.. note::
To avoid tedious invocation of camera functions through
``camera.extrinsics.someop()`` and ``camera.intrinsics.someop()``, kaolin overrides the ``__get_attributes__``
function to forward any function calls of ``camera.someop()`` to
the appropriate extrinsics / intrinsics submodule.
The entire pipeline of transformations can be summarized as (ignoring homogeneous coordinates)::
World Space Camera View Space
V ---CameraExtrinsics.transform()---> V' ---CameraIntrinsics.transform()---
Shape~(B, 3) (view matrix) Shape~(B, 3) |
|
(linear lens: projection matrix) |
+ homogeneus -> 3D |
V
Normalized Device Coordinates (NDC)
Shape~(B, 3)
When using view / projection matrices, conversion to homogeneous coordinates is required.
Alternatively, the `transform()` function takes care of such projections under the hood when needed.
How to apply transformations with kaolin's Camera:
1. Linear camera types, such as the commonly used pinhole camera,
support the :func:`view_projection_matrix()` method.
The returned matrix can be used to transform vertices through pytorch's matrix multiplication, or even be
passed to shaders as a uniform.
2. All Cameras are guaranteed to support a general :func:`transform()` function
which maps coordinates from world space to Normalized Device Coordinates space.
For some lens types which perform non linear transformations,
the :func:`view_projection_matrix()` is non-defined.
Therefore the camera transformation must be applied through
a dedicated function. For linear cameras,
:func:`transform()` may use matrices under the hood.
3. Camera parameters may also be queried directly.
This is useful when implementing camera params aware code such as ray tracers.
How to control kaolin's Camera:
- :class:`CameraExtrinsics`: is packed with useful methods for controlling the camera position and orientation:
:func:`translate() <CameraExtrinsics.translate()>`,
:func:`rotate() <CameraExtrinsics.rotate()>`,
:func:`move_forward() <CameraExtrinsics.move_forward()>`,
:func:`move_up() <CameraExtrinsics.move_up()>`,
:func:`move_right() <CameraExtrinsics.move_right()>`,
:func:`cam_pos() <CameraExtrinsics.cam_pos()>`,
:func:`cam_up() <CameraExtrinsics.cam_up()>`,
:func:`cam_forward() <CameraExtrinsics.cam_forward()>`,
:func:`cam_up() <CameraExtrinsics.cam_up()>`.
- :class:`CameraIntrinsics`: exposes a lens :func:`zoom() <CameraIntrinsics.zoom()>`
operation. The exact functionality depends on the camera type.
How to optimize the Camera parameters:
- Both :class:`CameraExtrinsics`: and :class:`CameraIntrinsics` maintain
:class:`torch.Tensor` buffers of parameters which support pytorch differentiable operations.
- Setting ``camera.requires_grad_(True)`` will turn on the optimization mode.
- The :func:`gradient_mask` function can be used to mask out gradients of specific Camera parameters.
.. note::
:class:`CameraExtrinsics`: supports multiple representions of camera parameters
(see: :func:`switch_backend <CameraExtrinsics.switch_backend()>`).
Specific representations are better fit for optimization
(e.g.: they maintain an orthogonal view matrix).
Kaolin will automatically switch to using those representations when gradient flow is enabled
For non-differentiable uses, the default representation may provide better
speed and numerical accuracy.
Other useful camera properties:
- Cameras follow pytorch in part, and support arbitrary ``dtype`` and ``device`` types through the
:func:`to()`, :func:`cpu()`, :func:`cuda()`, :func:`half()`, :func:`float()`, :func:`double()`
methods and :func:`dtype`, :func:`device` properties.
- :class:`CameraExtrinsics`: and :class:`CameraIntrinsics`: individually support the :func:`requires_grad`
property.
- Cameras implement :func:`torch.allclose` for comparing camera parameters under controlled numerical accuracy.
The operator ``==`` is reserved for comparison by ref.
- Cameras support batching, either through construction, or through the :func:`cat()` method.
.. note::
Since kaolin's cameras are batched, the view/projection matrices are of shapes :math:`(\text{num_cameras}, 4, 4)`,
and some operations, such as :func:`transform()` may return values as shapes of :math:`(\text{num_cameras}, \text{num_vectors}, 3)`.
Concluding remarks on coordinate systems and other confusing conventions:
- kaolin's Cameras assume column major matrices, for example, the inverse view matrix (cam2world) is defined as:
.. math::
\begin{bmatrix}
r1 & u1 & f1 & px \\
r2 & u2 & f2 & py \\
r3 & u3 & f3 & pz \\
0 & 0 & 0 & 1
\end{bmatrix}
This sometimes causes confusion as the view matrix (world2cam) uses a transposed 3x3 submatrix component,
which despite this transposition is still column major (observed through the last `t` column):
.. math::
\begin{bmatrix}
r1 & r2 & r3 & tx \\
u1 & u2 & u3 & ty \\
f1 & f2 & f3 & tz \\
0 & 0 & 0 & 1
\end{bmatrix}
- kaolin's cameras do not assume any specific coordinate system for the camera axes. By default, the
right handed cartesian coordinate system is used. Other coordinate systems are supported through
:func:`change_coordinate_system() <CameraExtrinsics.change_coordinate_system()>`
and the ``coordinates.py`` module::
Y
^
|
|---------> X
/
Z
- kaolin's NDC space is assumed to be left handed (depth goes inwards to the screen).
The default range of values is [-1, 1].
"""
_extrinsics_constructors = _gather_constructors(*_EXTRINSICS_MODULES)
"""Minimal arguments required to disambiguate & invoke the different extrinsics constructors.
(unique_arg_identifiers) -> (func, args)
"""
_intrinsics_constructors = _gather_constructors(*_INTRINSICS_MODULES)
"""Minimal arguments required to disambiguate & invoke the different extrinsics constructors.
(unique_arg_identifiers) -> (func, args)
"""
def __init__(self, extrinsics: CameraExtrinsics, intrinsics: CameraIntrinsics):
r"""Constructs a new camera module from the pre-constructed extrinsics and intrinsics components.
.. seealso::
:func:`Camera.from_args`
Args:
extrinsics (CameraExtrinsics):
A component containing the extrinsic information of the Camera, used to construct a view matrix
intrinsics (CameraIntrinsics):
A component containing the intrinsic information of the Camera, used to transform from camera
space to NDC space.
"""
assert len(extrinsics) == len(intrinsics)
assert extrinsics.device == intrinsics.device
self.extrinsics: CameraExtrinsics = extrinsics
self.intrinsics: CameraIntrinsics = intrinsics
def from_args(cls, **kwargs):
r"""A convenience constructor for the camera class, which takes all extrinsics & intrinsics arguments
at once, and disambiguates them to construct a complete camera object.
The correct way of using this constructor is by specifying the camera args as \**kwargs, for example::
# Construct a pinhole camera with perspective projection
Camera.from_args(
eye=torch.tensor([10.0, 0.0, 0.0]),
at=torch.tensor([0.0, 0.0, 0.0]),
up=torch.tensor([0.0, 1.0, 0.0]),
fov=30 * np.pi / 180, # alternatively focal_x, optionally specify: focal_y, x0, y0
width=800, height=800,
near=1e-2, far=1e2,
dtype=torch.float64,
device='cuda'
)
# Construct an orthographic camera
Camera.from_args(
eye=np.array([10.0, 0.0, 4.0]),
at=np.array([0.0, 0.0, 0.0]),
up=np.array([0.0, 1.0, 0.0]),
width=800, height=800,
near=-800, far=800,
fov_distance=1.0,
dtype=torch.float32,
device='cuda'
)
# Construct a pinhole camera
Camera.from_args(
view_matrix=torch.tensor([[1.0, 0.0, 0.0, 0.5],
[0.0, 1.0, 0.0, 0.5],
[0.0, 0.0, 1.0, 0.5],
[0.0, 0.0, 0.0, 1.0]]),
focal_x=1000,
width=1600, height=1600,
)
Args:
**kwargs (dict of str, *):
keywords specifying the parameters of the camera.
Valid options are a combination of extrinsics, intrinsics and general properties:
* Extrinsic params: ``eye``, ``at``, ``up`` / ``view_matrix`` / ``cam_pos``, ``cam_dir``
* Perspective intrinsic params: ``fov`` / ``focal_x``,
optionally: ``x0``, ``y0``, ``focal_y``, ``fov_direction``
* Orthographic intrinsic params: ``fov_distance``
optionally: ``x0``, ``y0``
* General intrinsic dimensions: ``width``, ``height``, optionally: ``near``, ``far``
* Tensor params properties - optionally: ``device``, ``dtype``
"""
call_args = frozenset(kwargs)
extrinsic_key = [k for k in Camera._extrinsics_constructors.keys() if k.issubset(call_args)]
intrinsic_key = [k for k in Camera._intrinsics_constructors.keys() if k.issubset(call_args)]
if len(extrinsic_key) != 1:
raise ValueError('Camera construction failed due to ambiguous parameters: '
f'{list(kwargs.keys())}')
extrinsic_key = extrinsic_key[0]
extrinsic_ctor, extrinsic_args = Camera._extrinsics_constructors[extrinsic_key]
if len(intrinsic_key) == 0:
# Protect against empty match
intrinsic_key = None
elif len(intrinsic_key) == 1:
# call args should ideally only match a single extrinsics & intrinsics constructor key
intrinsic_key = intrinsic_key[0]
else:
# If more than one intrinsics constructor matches the args, check the other direction:
# are all given callargs contained in the combined extrinsics & intrinsics ctors.
def _is_callargs_subset_of_ctor(key):
_, intrinsic_args = Camera._intrinsics_constructors[key]
candidate_call_args = set(extrinsic_args).union(set(intrinsic_args))
return call_args.issubset(candidate_call_args)
intrinsic_key = list(filter(_is_callargs_subset_of_ctor, intrinsic_key))
# Finally, check if all remaining matches are a subset of a single constructor.
# If so, choose this constructor as it is the one most explicitly referred.
if len(intrinsic_key) > 0:
longest_match = max(intrinsic_key, key=lambda s: len(s))
is_all_matches_subset_of_longest = all([s.issubset(longest_match) for s in intrinsic_key])
intrinsic_key = longest_match if is_all_matches_subset_of_longest else None
if intrinsic_key is None:
raise ValueError(f'Camera construction failed due to ambiguous parameters: {list(kwargs.keys())}')
intrinsic_ctor, intrinsic_args = Camera._intrinsics_constructors[intrinsic_key]
extrinsic_args = {k: v for k, v in kwargs.items() if k in call_args.intersection(extrinsic_args)}
tensors_devices = set([arg.device for arg in extrinsic_args.values() if isinstance(arg, torch.Tensor)])
if 'device' not in extrinsic_args and len(tensors_devices) > 1:
raise ValueError(f'Camera construction with tensors args on different devices is not allowed '
f'without explicitly specifying the Camera "device". Please '
f'review the Camera input args: {list(kwargs.keys())}')
extrinsics = extrinsic_ctor(**extrinsic_args)
_intrinsic_args = {k: v for k, v in kwargs.items() if k in call_args.intersection(intrinsic_args)}
# Make sure dtype and device are consistent with extrinsics
_intrinsic_args['device'] = extrinsics.device
_intrinsic_args['dtype'] = extrinsics.dtype
# Support broadcasting of intrinsics in case extrinsics are batched
# (for intrinsic ctors supporting a number of cameras param)
if 'num_cameras' in intrinsic_args and 'num_cameras' not in _intrinsic_args:
_intrinsic_args['num_cameras'] = len(extrinsics)
intrinsics = intrinsic_ctor(**_intrinsic_args)
return Camera(extrinsics=extrinsics, intrinsics=intrinsics)
def parameters(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns the full parameters set of the camera,
divided to extrinsics and intrinsics parameters
Returns:
(torch.Tensor, torch.Tensor):
the extrinsics and the intrinsics parameters.
"""
return self.extrinsics.parameters(), self.intrinsics.parameters()
def gradient_mask(self, *args: Union[str, ExtrinsicsParamsDefEnum, IntrinsicsParamsDefEnum]) -> torch.Tensor:
"""Creates a gradient mask, which allows to backpropagate only through
params designated as trainable.
This function does not consider the :attr:`requires_grad` field when creating this mask.
.. note::
The 3 extrinsics camera axes are always masked as trainable together.
This design choice ensures that these axes, as well as the view matrix, remain orthogonal.
Args:
*args :
A vararg list of the extrinsic and intrinsic params that should allow gradient flow.
This function also supports conversion of params from their string names.
(i.e: 't' will convert to ``PinholeParamsDefEnum.t``).
Returns:
(torch.BoolTensor, torch.BoolTensor):
the gradient masks, of same shapes than
``self.extrinsics.parameters()`` and ``self.intrinsics.parameters()``.
Example:
>>> extrinsics_mask, intrinsics_mask = camera.gradient_mask('t', 'focal_x', 'focal_y')
>>> # equivalent to the args:
>>> # ExtrinsicsParamsDefEnum.t, IntrinsicsParamsDefEnum.focal_x, IntrinsicsParamsDefEnum.focal_y
>>> extrinsics_params, intrinsic_params = camera.params()
>>> extrinsics_params.register_hook(lambda grad: grad * extrinsics_mask.float())
>>> # extrinsics will now allow gradient flow only for the camera location
>>> intrinsic_params.register_hook(lambda grad: grad * intrinsics_mask.float())
>>> # intrinsics will now allow gradient flow only for the focal length
"""
args = set(args)
str_args = set([a for a in args if isinstance(a, str)])
all_extrinsic_params_str = [e.name for e in ExtrinsicsParamsDefEnum]
all_extrinsic_args = set([a for a in args if isinstance(a, ExtrinsicsParamsDefEnum)])
all_intrinsic_args = set([a for a in args if isinstance(a, IntrinsicsParamsDefEnum)])
extrinsic_args = str_args.intersection(all_extrinsic_params_str).union(all_extrinsic_args)
intrinsic_args = str_args.difference(all_extrinsic_params_str).union(all_intrinsic_args)
return self.extrinsics.gradient_mask(*extrinsic_args), self.intrinsics.gradient_mask(*intrinsic_args)
def width(self) -> int:
"""Camera image plane width (pixel resolution)"""
return self.intrinsics.width
def width(self, value: int) -> None:
"""Camera image plane width (pixel resolution)"""
self.intrinsics.width = value
def height(self) -> int:
"""Camera image plane height (pixel resolution)"""
return self.intrinsics.height
def height(self, value: int) -> None:
"""Camera image plane height (pixel resolution)"""
self.intrinsics.height = value
def lens_type(self) -> str:
r"""A textual description of the camera lens type. (i.e 'pinhole', 'ortho')
"""
return self.intrinsics.lens_type
def device(self) -> torch.device:
"""torch device of parameters tensors"""
assert self.extrinsics.device == self.intrinsics.device, \
'Camera extrinsics and intrinsics use different devices'
return self.extrinsics.device
def dtype(self) -> torch.dtype:
"""torch dtype of parameters tensors"""
assert self.extrinsics.dtype == self.intrinsics.dtype, \
'Camera extrinsics and intrinsics use different dtypes'
return self.extrinsics.dtype
def requires_grad_(self, val: bool):
"""Toggle gradient flow for both extrinsics and intrinsics params.
.. note::
To read the requires_grad attribute access the extrinsics / intrinsics components
explicitly, as their requires_grad status may differ.
"""
self.extrinsics.requires_grad = val
self.intrinsics.requires_grad = val
def to(self, *args, **kwargs) -> Camera:
return Camera(extrinsics=self.extrinsics.to( *args, **kwargs), intrinsics=self.intrinsics.to( *args, **kwargs))
def cpu(self) -> Camera:
return Camera(extrinsics=self.extrinsics.cpu(), intrinsics=self.intrinsics.cpu())
def cuda(self) -> Camera:
return Camera(extrinsics=self.extrinsics.cuda(), intrinsics=self.intrinsics.cuda())
def half(self) -> Camera:
return Camera(extrinsics=self.extrinsics.half(), intrinsics=self.intrinsics.half())
def float(self) -> Camera:
return Camera(extrinsics=self.extrinsics.float(), intrinsics=self.intrinsics.float())
def double(self) -> Camera:
return Camera(extrinsics=self.extrinsics.double(), intrinsics=self.intrinsics.double())
def transform(self, vectors: torch.Tensor):
r"""Applies extrinsic and instrinsic projections consecutively,
thereby projecting the vectors from world to NDC space.
Args:
vectors (torch.Tensor):
the vectors to transform,
of shape :math:`(\text{batch_size}, 3)` or
:math:`(\text{num_cameras}, \text{batch_size}, 3)`.
Returns:
(torch.Tensor):
The vectors projected to NDC space, of the same shape as ``vectors``,
transform can be broadcasted.
"""
post_view = self.extrinsics.transform(vectors)
post_proj = self.intrinsics.transform(post_view)
# Broadcast for single camera in batch: Reshape output to (B, 3) or (C, B, 3) according to input vectors
if len(self) == 1:
post_proj = post_proj.reshape(vectors.shape)
return post_proj
def view_projection_matrix(self):
"""Return the composed view projection matrix.
.. note::
Works only for cameras with linear projection transformations.
Returns:
(torch.Tensor): The view projection matrix, of shape :math:`(\text{num_cameras}, 4, 4)`
"""
view = self.extrinsics.view_matrix()
projection = self.intrinsics.projection_matrix()
return torch.bmm(projection, view)
def cat(cls, cameras: Sequence[Camera]):
"""Concatenate multiple Camera's.
Assumes all cameras use the same width, height, near and far planes.
Args:
cameras (Sequence of Camera): the cameras to concatenate.
Returns:
(Camera): The concatenated cameras as a single Camera.
"""
return Camera(extrinsics=CameraExtrinsics.cat([c.extrinsics for c in cameras]),
intrinsics=CameraIntrinsics.cat([c.intrinsics for c in cameras]))
def __getattr__(self, item):
"""Allows for an easier API - camera attributes are routed to intrinsic / extrinsic components
"""
if item.startswith('__') and item.endswith('__'):
raise AttributeError
extrinsic_attr = hasattr(self.extrinsics, item)
intrinsic_attr = hasattr(self.intrinsics, item)
assert not intrinsic_attr or not extrinsic_attr, \
"Camera cannot implicitly route attribute to extrinsic or intrinsic components " + \
f"as both have similar named attribute {item}"
if extrinsic_attr:
return getattr(self.extrinsics, item)
elif intrinsic_attr:
return getattr(self.intrinsics, item)
else:
raise AttributeError
def __setattr__(self, item, value):
"""Allows for an easier API - camera attributes are routed to intrinsic / extrinsic components
"""
if item in ("extrinsics", "intrinsics") or (item.startswith('__') and item.endswith('__')):
super().__setattr__(item, value)
else:
extrinsics = getattr(self, "extrinsics")
intrinsics = getattr(self, "intrinsics")
extrinsic_attr = hasattr(extrinsics, item)
intrinsic_attr = hasattr(intrinsics, item)
if extrinsic_attr and intrinsic_attr:
raise AttributeError(
f'Attribute "{item}" is defined in both CameraExtrinsics and '
'CameraIntrinsics classes. Therefore implicitly setting a new value '
'through the Camera is ambiguous.')
elif extrinsic_attr:
setattr(extrinsics, item, value)
elif intrinsic_attr:
setattr(intrinsics, item, value)
else:
super().__setattr__(item, value) # Set attribute for this class
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
def __eq__(self, other):
if not isinstance(other, Camera):
return False
return self.extrinsics == other.extrinsics and self.intrinsics == other.intrinsics
def __getitem__(self, item):
return Camera(extrinsics=self.extrinsics[item], intrinsics=self.intrinsics[item])
def __len__(self):
return len(self.extrinsics) # Assumed to be identical to length of intrinsics
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in _HANDLED_TORCH_FUNCTIONS or not all(
issubclass(t, (torch.Tensor, Camera))
for t in types
):
return NotImplemented
return _HANDLED_TORCH_FUNCTIONS[func](*args, **kwargs)
def named_params(self) -> List[Dict[str, float]]:
"""Get a descriptive list of named parameters per camera.
Returns:
(list of dict): The named parameters.
"""
return [dict(e, **i) for e, i in zip(self.extrinsics.named_params(), self.intrinsics.named_params())]
def __str__(self) -> str:
return self.extrinsics.__str__() + '\n' + self.intrinsics.__str__()
def __repr__(self) -> str:
return self.extrinsics.__repr__() + '\n' + self.intrinsics.__repr__()
The provided code snippet includes necessary dependencies for implementing the `allclose` function. Write a Python function `def allclose(input: Camera, other: Camera, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool` to solve the following problem:
This function checks if the camera extrinsics and intrinsics, are close using :func:`torch.allclose`. Args: input (Camera): first camera to compare other (Camera): second camera to compare atol (float, optional): absolute tolerance. Default: 1e-08 rtol (float, optional): relative tolerance. Default: 1e-05 equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` Returns: (bool): Result of the comparison
Here is the function:
def allclose(input: Camera, other: Camera, rtol: _float = 1e-05, atol: _float = 1e-08,
equal_nan: _bool = False) -> _bool:
"""This function checks if the camera extrinsics and intrinsics,
are close using :func:`torch.allclose`.
Args:
input (Camera): first camera to compare
other (Camera): second camera to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal.
Default: ``False``
Returns:
(bool): Result of the comparison
"""
return torch.allclose(input.extrinsics, other.extrinsics, rtol=rtol, atol=atol, equal_nan=equal_nan) and \
torch.allclose(input.intrinsics, other.intrinsics, rtol=rtol, atol=atol, equal_nan=equal_nan) | This function checks if the camera extrinsics and intrinsics, are close using :func:`torch.allclose`. Args: input (Camera): first camera to compare other (Camera): second camera to compare atol (float, optional): absolute tolerance. Default: 1e-08 rtol (float, optional): relative tolerance. Default: 1e-05 equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` Returns: (bool): Result of the comparison |
4,642 | from __future__ import annotations
from typing import Union, Tuple, Iterable, Sequence, List, Dict
import functools
import numpy as np
import torch
from torch.types import _float, _bool
from . import extrinsics_backends
from .extrinsics_backends import ExtrinsicsParamsDefEnum, ExtrinsicsRep, _REGISTERED_BACKENDS
_HANDLED_TORCH_FUNCTIONS = dict()
The provided code snippet includes necessary dependencies for implementing the `implements` function. Write a Python function `def implements(torch_function)` to solve the following problem:
Registers a torch function override for CameraExtrinsics
Here is the function:
def implements(torch_function):
"""Registers a torch function override for CameraExtrinsics"""
@functools.wraps(torch_function)
def decorator(func):
_HANDLED_TORCH_FUNCTIONS[torch_function] = func
return func
return decorator | Registers a torch function override for CameraExtrinsics |
4,643 | from __future__ import annotations
from typing import Union, Tuple, Iterable, Sequence, List, Dict
import functools
import numpy as np
import torch
from torch.types import _float, _bool
from . import extrinsics_backends
from .extrinsics_backends import ExtrinsicsParamsDefEnum, ExtrinsicsRep, _REGISTERED_BACKENDS
The provided code snippet includes necessary dependencies for implementing the `register_backend` function. Write a Python function `def register_backend(name: str)` to solve the following problem:
Registers a representation backend class with a unique name. CameraExtrinsics can switch between registered representations dynamically (see :func:`switch_backend()`).
Here is the function:
def register_backend(name: str):
"""Registers a representation backend class with a unique name.
CameraExtrinsics can switch between registered representations dynamically (see :func:`switch_backend()`).
"""
return extrinsics_backends.register_backend(name) | Registers a representation backend class with a unique name. CameraExtrinsics can switch between registered representations dynamically (see :func:`switch_backend()`). |
4,644 | from __future__ import annotations
from typing import Union, Tuple, Iterable, Sequence, List, Dict
import functools
import numpy as np
import torch
from torch.types import _float, _bool
from . import extrinsics_backends
from .extrinsics_backends import ExtrinsicsParamsDefEnum, ExtrinsicsRep, _REGISTERED_BACKENDS
class CameraExtrinsics():
r"""Holds the extrinsics parameters of a camera: position and orientation in space.
This class maintains the view matrix of camera, used to transform points from world coordinates
to camera / eye / view space coordinates.
This view matrix maintained by this class is column-major, and can be described by the 4x4 block matrix:
.. math::
\begin{bmatrix}
R & t \\
0 & 1
\end{bmatrix}
where **R** is a 3x3 rotation matrix and **t** is a 3x1 translation vector for the orientation and position
respectively.
This class is batched and may hold information from multiple cameras.
:class:`CameraExtrinsics` relies on a dynamic representation backend to manage the tradeoff between various choices
such as speed, or support for differentiable rigid transformations.
Parameters are stored as a single tensor of shape :math:`(\text{num_cameras}, K)`,
where K is a representation specific number of parameters.
Transformations and matrices returned by this class support differentiable torch operations,
which in turn may update the extrinsic parameters of the camera::
convert_to_mat
Backend ---- > Extrinsics
Representation R View Matrix M
Shape (num_cameras, K), Shape (num_cameras, 4, 4)
< ----
convert_from_mat
.. note::
Unless specified manually with :func:`switch_backend`,
kaolin will choose the optimal representation backend depending on the status of ``requires_grad``.
.. note::
Users should be aware, but not concerned about the conversion from internal representations to view matrices.
kaolin performs these conversions where and if needed.
Supported backends:
- **"matrix_se3"**\: A flattened view matrix representation, containing the full information of
special euclidean transformations (translations and rotations).
This representation is quickly converted to a view matrix, but differentiable ops may cause
the view matrix to learn an incorrect, non-orthogonal transformation.
- **"matrix_6dof_rotation"**\: A compact representation with 6 degrees of freedom, ensuring the view matrix
remains orthogonal under optimizations. The conversion to matrix requires a single Gram-Schmidt step.
.. seealso::
`On the Continuity of Rotation Representations in Neural Networks, Zhou et al. 2019
<https://arxiv.org/abs/1812.07035>`_
Unless stated explicitly, the definition of the camera coordinate system used by this class is up to the
choice of the user.
Practitioners should be mindful of conventions when pairing the view matrix managed by this class with a projection
matrix.
"""
DEFAULT_BACKEND = 'matrix_se3'
DEFAULT_DIFFERENTIABLE_BACKEND = 'matrix_6dof_rotation'
def __init__(self, backend: ExtrinsicsRep, shared_fields: dict = None):
"""
Constructs the camera extrinsics with a representation backend.
.. warning::
!! ``__init__`` should not be called directly !!
See other convenience constructors below to build the extrinsics given some parameters:
* :func:`from_lookat`:
constructs the extrinsics module from look, at and pos vectors.
* :func:`from_camera_pose`:
constructs the extrinsics module from the camera position & orientation.
* :func:`from_view_matrix`:
constructs the extrinsics module from a 4x4 view matrix.
Args:
backend ExtrinsicsRep Representation backend
shared_fields Dictionary of values that should be shared when "views" or shallow copies of the
``CameraExtrinsics`` class are created. Changes made to those fields are reflected in all copies.
"""
self._backend = backend
# _shared_fields ensures that views created on this instance will mirror any changes back
# These fields can be accessed as simple properties
if shared_fields is not None:
# Another object have shared its fields, access them through the dict
self._shared_fields = shared_fields
else:
self._shared_fields = dict(
# 3x3 matrix for bookkeeping coordinate system changes performed
base_change_matrix=torch.eye(3, device=self.device, dtype=self.dtype),
# True only when a specific backend was explicitly requested by a user,
# either during construction or by invoking switch_backend().
# In this mode kaolin will not attempt to optimize for the best representation backend
# to the current state but assume the user is responsible for setting one according to their needs
user_requested_backend=False
)
def _internal_switch_backend(self, backend_name: str):
"""
Switches the representation backend to a different implementation.
'backend_name' must be a registered backend.
.. note::
This function does not allow gradient flow, as it is error prone.
"""
assert backend_name in _REGISTERED_BACKENDS,\
f"CameraExtrinsics attempted to switch internal representation to an " \
f"unregistered backend: {backend_name}. Valid values are registered backends: {self.available_backends()}"
mat = self._backend.convert_to_mat()
backend_cls = _REGISTERED_BACKENDS[backend_name]
self._backend = backend_cls.from_mat(mat,
dtype=self.dtype, device=self.device, requires_grad=self.requires_grad)
self._shared_fields = self._shared_fields.copy() # Detach from shared fields of previous views
def switch_backend(self, backend_name: str):
"""Switches the representation backend to a different implementation.
.. note::
Manually switching the representation backend will hint kaolin it should turn off automatic backend
selection. Users should normally use this manual feature if they're testing a new type of representation.
For most use cases, it is advised to let kaolin choose the representation backend automatically,
and avoid using this function explicitly.
.. warning::
This function does not allow gradient flow, as it is error prone.
Args:
backend_name (str):
the backend to switch to, must be a registered backend.
Values supported by default\: ``matrix_se3``, ``matrix_6dof_rotation`` (see class description).
"""
self._internal_switch_backend(backend_name=backend_name)
self._shared_fields['user_requested_backend'] = True
def _make_backend(cls, mat: torch.Tensor,
dtype: torch.dtype = default_dtype,
device: Union[torch.device, str] = None,
requires_grad: bool = False, backend_name: str = None):
"""
Creates representation backend from given (C, 4, 4) view matrix and type parameters.
"""
# Batchify
if mat.ndim == 2:
mat = mat.unsqueeze(0)
# If a backend name is explicitly requested, use that one
if backend_name is not None:
assert backend_name in _REGISTERED_BACKENDS,\
f'CameraExtrinsics tried to use backend: {backend_name},' \
f'which is not registered. Available backends: {cls.available_backends()}'
else:
# If no backend was specified, then by default we choose one which is optimal for torch differentiable ops
if requires_grad:
backend_name = CameraExtrinsics.DEFAULT_DIFFERENTIABLE_BACKEND
else:
backend_name = CameraExtrinsics.DEFAULT_BACKEND
backend_class = _REGISTERED_BACKENDS[backend_name]
backend = backend_class.from_mat(mat, dtype, device, requires_grad)
return backend
def _from_world_in_cam_coords(cls, rotation: torch.Tensor, translation: torch.Tensor,
dtype: torch.dtype = default_dtype,
device: Union[torch.device, str] = None,
requires_grad: bool = False, backend_name: str = None) -> CameraExtrinsics:
"""Constructs the extrinsics from a rigid transformation describing
how the world is transformed relative to the camera.
Essentially, this constructor builds the extrinsic matrix directly from
the world origin and directions of world axes in camera coordinates.
Args:
rotation (torch.Tensor):
of shape [C]x3x3, for rotating the world to align with camera coordinates
translation (torch.Tensor):
of shape [C]x3 or [C]x3x1, for translating the world to align with camera coordinates
device (str):
The CameraExtrinsics object will manage torch tensors on this device
requires_grad (bool):
Sets the requires_grad field for the params tensor of the CameraExtrinsics
backend (str):
The backend used to manage the internal representation of the extrinsics, and how it is converted
to a view matrix.
Different representations are tuned to varied use cases:
speed, differentiability w.r.t rigid transformations space, and so forth.
Normally this should be left as ``None`` to let kaolin automatically select the optimal backend.
Valid values: matrix_se3’, ‘matrix_6dof_rotation’ (see class description).
"""
batch_dim = rotation.shape[0] if rotation.ndim > 2 else 1
mat = torch.zeros((batch_dim, 4,4), dtype=rotation.dtype, device=rotation.device)
mat[:, :3, :3] = rotation
mat[:, :3, 3] = translation.squeeze(-1)
mat[:, 3, 3] = 1
backend = cls._make_backend(mat, dtype, device, requires_grad, backend_name)
extrinsics = CameraExtrinsics(backend)
extrinsics._shared_fields['user_requested_backend'] = backend_name is not None
return extrinsics
def _to_tensor_input(data: Union[np.ndarray, torch.Tensor], dtype: torch.dtype, device: Union[torch.device, str]):
""" A convenience method allocate torch tensors from data of other numpy arrays / torch tensors """
if isinstance(data, torch.Tensor):
return data.to(dtype=dtype, device=device)
else:
return torch.tensor(data, device=device, dtype=dtype)
def from_camera_pose(cls,
cam_pos: Union[np.ndarray, torch.Tensor],
cam_dir: Union[np.ndarray, torch.Tensor],
dtype: torch.dtype = default_dtype,
device: Union[torch.device, str] = None,
requires_grad: bool = False,
backend: str = None) -> CameraExtrinsics:
r"""Constructs the extrinsics from the camera pose and orientation in world coordinates.
Args:
cam_pos (numpy.ndarray or torch.Tensor):
the location of the camera center in world-coordinates,
of shape :math:`(3,)`, :math:`(3, 1)`, :math:`(\text{num_cameras}, 3)` or
:math:`(\text{num_cameras}, 3, 1)`
cam_dir (numpy.ndarray or torch.Tensor):
the camera's orientation with respect to the world,
of shape :math:`(3, 3)` or :math:`(\text{num_cameras}, 3, 3)`
dtype (optional, str):
the dtype used for the tensors managed by the CameraExtrinsics.
If dtype is None, :func:`torch.get_default_dtype()` will be used
device (optional, str):
the device on which the CameraExtrinsics object will manage its tensors.
If device is None, the default torch device will be used
requires_grad (bool):
Sets the requires_grad field for the params tensor of the CameraExtrinsics
backend (str):
The backend used to manage the internal representation of the extrinsics, and how it is converted
to a view matrix.
Different representations are tuned to varied use cases:
speed, differentiability w.r.t rigid transformations space, and so forth.
Normally this should be left as ``None`` to let kaolin automatically select the optimal backend.
Valid values: ``matrix_se3``, ``matrix_6dof_rotation`` (see class description).
Returns:
(CameraExtrinsics): the camera extrinsics
"""
cam_pos = cls._to_tensor_input(cam_pos, device=device, dtype=dtype)
cam_dir = cls._to_tensor_input(cam_dir, device=device, dtype=dtype)
# The camera pose / orientation in world coordinates are converted to the world pose / axes in
# camera coordinates:
# R_world = R_cam.T
# t_world = -R_world @ t_cam
world_rotation = torch.transpose(cam_dir, -1, -2)
if cam_pos.shape[-1] != 1:
cam_pos = cam_pos.unsqueeze(-1)
world_translation = -world_rotation @ cam_pos
return cls._from_world_in_cam_coords(rotation=world_rotation, translation=world_translation,
dtype=dtype, device=device, requires_grad=requires_grad,
backend_name=backend)
def from_lookat(cls,
eye: Union[np.ndarray, torch.Tensor],
at: Union[np.ndarray, torch.Tensor],
up: Union[np.ndarray, torch.Tensor],
dtype: torch.dtype = default_dtype,
device: Union[torch.device, str] = None,
requires_grad: bool = False,
backend: str = None) -> CameraExtrinsics:
r"""Constructs the extrinsic from camera position, camera up vector,
and destination the camera is looking at.
This constructor is compatible with glm's lookat function, which by default assumes a
cartesian right-handed coordinate system (z axis positive direction points outwards from screen).
Args:
eye (numpy.ndarray or torch.Tensor):
the location of the camera center in world-coordinates,
of shape :math:`(3,)`, :math:`(3, 1)`, :math:`(\text{num_cameras}, 3)` or
:math:`(\text{num_cameras}, 3, 1)`
up (numpy.ndarray or torch.Tensor):
the vector pointing up from the camera in world-coordinates,
of shape :math:`(3,)`, :math:`(3, 1)`, :math:`(\text{num_cameras}, 3)`
or :math:`(\text{num_cameras}, 3, 1)`
at (numpy.ndarray or torch.Tensor) of [C]x3 or [C]x3x1,
the direction the camera is looking at in world-coordinates,
of shape :math:`(3,)`, :math:`(3, 1)`, :math:`(\text{num_cameras}, 3)`
or :math:`(\text{num_cameras}, 3, 1)`
dtype (optional, str):
the dtype used for the tensors managed by the CameraExtrinsics.
If dtype is None, the :func:`torch.get_default_dtype()` will be used
device (optional, str):
the device on which the CameraExtrinsics object will manage its tensors.
If device is None, the default torch device will be used
requires_grad (bool):
Sets the requires_grad field for the params tensor of the CameraExtrinsics
backend (str):
The backend used to manage the internal representation of the extrinsics, and how it is converted
to a view matrix.
Different representations are tuned to varied use cases:
speed, differentiability w.r.t rigid transformations space, and so forth.
Normally this should be left as ``None`` to let kaolin automatically select the optimal backend.
Valid values: ``matrix_se3``, ``matrix_6dof_rotation`` (see class description).
Returns:
(CameraExtrinsics): the camera extrinsics
"""
eye = cls._to_tensor_input(eye, device=device, dtype=dtype)
at = cls._to_tensor_input(at, device=device, dtype=dtype)
up = cls._to_tensor_input(up, device=device, dtype=dtype)
# Transform to tensors of (C, 3)
eye = eye.squeeze(-1)
at = at.squeeze(-1)
up = up.squeeze(-1)
if eye.ndim == 1:
eye = eye.unsqueeze(0)
if at.ndim == 1:
at = at.unsqueeze(0)
if up.ndim == 1:
up = up.unsqueeze(0)
# Follow OpenGL conventions: https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluLookAt.xml
backward = at - eye
backward = torch.nn.functional.normalize(input=backward, dim=-1)
right = torch.cross(backward, up, dim=-1)
right = torch.nn.functional.normalize(input=right, dim=-1)
up = torch.cross(right, backward, dim=-1)
# For clarity: the extrinsic matrix maintained by this class is column major.
# So far we constructed components that specify the camera position and orientation in world coordinates.
# However, the view matrix is actually constructed using the world origin & axes in camera coordinates.
# Hence we build components for constructing the inverse view matrix.
# The view matrix can be obtained by inverting the matrix components we constructed:
# (1) This amounts to transposing the rotation component,
# (2) Negating the translation component in world coordinates, and multiplying it with the eye location).
# Form a batched tensor where the two last dimensions are a matrix (form it transposed already):
# [r1, r2, r3, Right
# u1, u2, u3, Up
# -b1, -b2, -b3] Forward
world_rotation = torch.stack((right, up, -backward), dim=1)
# Translation component, according to cam location within the world
world_translation = -world_rotation @ eye.unsqueeze(-1)
return cls._from_world_in_cam_coords(rotation=world_rotation, translation=world_translation,
dtype=dtype, device=device, requires_grad=requires_grad,
backend_name=backend)
def from_view_matrix(cls,
view_matrix: Union[np.array, torch.Tensor],
dtype: torch.dtype = default_dtype,
device: Union[torch.device, str] = None,
requires_grad: bool = False,
backend: str = None) -> CameraExtrinsics:
r"""Constructs the extrinsics from a given view matrix
of shape :math:`(\text{num_cameras}, 4, 4)`.
The matrix should be a column major view matrix, for converting vectors from world to camera coordinates
(a.k.a: world2cam matrix):
.. math::
\begin{bmatrix}
r1 & r2 & r3 & tx \\
u1 & u2 & u3 & ty \\
f1 & f2 & f3 & tz \\
0 & 0 & 0 & 1
\end{bmatrix}
with:
- **r**: Right - world x axis, in camera coordinates,
also the camera right axis, in world coordinates
- **u**: Up - world y axis, in camera coordinates,
also the camera up axis, in world coordinates
- **f**: Forward - world z axis, in camera coordinates,
also the camera forward axis, in world coordinates
- **t**: Position - the world origin in camera coordinates
if you're using a different coordinate system, the axes may be permuted.
.. seealso::
:func:`change_coordinate_system()`
Args:
view_matrix (numpy.ndarray or torch.Tensor):
view matrix, of shape :math:`(\text{num_cameras}, 4, 4)`
dtype (optional, str):
the dtype used for the tensors managed by the CameraExtrinsics.
If dtype is None, the :func:`torch.get_default_dtype()` will be used
device (optional, str):
the device on which the CameraExtrinsics object will manage its tensors.
If device is None, the default torch device will be used
requires_grad (bool):
Sets the requires_grad field for the params tensor of the CameraExtrinsics
backend (str):
The backend used to manage the internal representation of the extrinsics, and how it is converted
to a view matrix.
Different representations are tuned to varied use cases:
speed, differentiability w.r.t rigid transformations space, and so forth.
Normally this should be left as ``None`` to let kaolin automatically select the optimal backend.
Valid values: ``matrix_se3``, ``matrix_6dof_rotation`` (see class description).
Returns:
(CameraExtrinsics): the camera extrinsics
"""
view_matrix = cls._to_tensor_input(view_matrix, device=device, dtype=dtype)
backend = cls._make_backend(view_matrix, dtype, device, requires_grad, backend)
extrinsics = CameraExtrinsics(backend)
extrinsics._shared_fields['user_requested_backend'] = backend is not None
return extrinsics
def change_coordinate_system(self, basis_change: Union[np.array, torch.Tensor]):
r"""Applies a coordinate system change using the given 3x3 permutation & reflections matrix.
For instance:
(1) From a Y-up coordinate system (cartesian) to Z-up:
.. math::
\text{basis_change} = \begin{bmatrix}
1 & 0 & 0 \\
0 & 0 & -1 \\
0 & 1 & 0
\end{bmatrix}
(2) From a right handed coordinate system (Z pointing outwards) to a left handed one (Z pointing inwards):
.. math::
\text{basis_change} = \begin{bmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & -1
\end{bmatrix}
The basis_change is assumed to have a determinant of +1 or -1.
.. seealso::
:func:`blender_coords()` and :func:`opengl_coords()`
Args:
basis_change (numpy.ndarray or torch.Tensor):
a composition of axes permutation and reflections, of shape :math:`(3, 3)`
"""
# One prevalent form of performing coordinate change is swapping / negating the inverse view matrix rows.
# That is - we want to alter the camera axes & position in WORLD coordinates.
# Note it's enough however, to multiply the R component of the view matrix by the basis change matrix transpose
# (recall we rotate about the world origin, which remains in place).
#
# Compare the inverse matrix before after basis change:
# Pre basis change:
# view_matrix = inverse_view_matrix = Rt is R transposed
# [ R | t ] [ Rt | -Rt @ t ] @ denotes matrix column multiplication
# [ 0 | 1 ] [ 0 | 1 ]
#
# Post basis change:
# view_matrix = inverse_view_matrix = P is the basis change matrix
# [ R @ Pt | t ] [ P @ Rt | -(P @ Rt) @ t ] Pt is the transposition of P
# [ 0 | 1 ] [ 0 | 1 ]
#
# = [ P @ Rt | P @ (-Rt @ t) ]
# [ 0 | 1 ]
basis_change = self._to_tensor_input(basis_change, device=self.device, dtype=self.dtype)
# Cache basis change matrix to be able to revert later if desired
self._base_change_matrix = self._base_change_matrix @ basis_change
basis_change = basis_change.T
basis_change = basis_change.repeat(len(self), 1, 1)
self.R = self.R @ basis_change
def reset_coordinate_system(self):
"""Resets the coordinate system back to the default one used by kaolin
(right-handed cartesian: x pointing right, y pointing up, z pointing outwards)"""
self.change_coordinate_system(self._base_change_matrix.T)
def R(self) -> torch.Tensor:
r"""A tensor whose columns represent the directions of world-axes in camera coordinates,
of shape :math:`(\text{num_cameras}, 3, 3)`.
This is the **R** submatrix of the extrinstic matrix:
.. math::
\begin{bmatrix}
R & t \\
0 & 1
\end{bmatrix}
defined as:
.. math::
R = \begin{bmatrix}
r1 & r2 & r3 \\
u1 & u2 & u3 \\
f1 & f2 & f3
\end{bmatrix}
with:
- **r**: Right - world x axis, in camera coordinates,
also the camera right axis, in world coordinates
- **u**: Up - world y axis, in camera coordinates,
also the camera up axis, in world coordinates
- **f**: Forward - world z axis, in camera coordinates,
also the camera forward axis, in world coordinates
.. seealso:
:attr:`cam_forward`, :attr:`cam_up`, :attr:`cam_right` for camera axes
in world coordinates.
"""
return self.view_matrix()[:, :3, :3]
def R(self, val: torch.Tensor):
"""Sets a subset of the matrix whose columns represent
the directions of world-axes in camera coordinates.
"""
mat = self.view_matrix()
mat[:, :3, :3] = val
self._backend.update(mat)
def t(self) -> torch.Tensor:
r"""The position of world origin in camera coordinates,
a torch.Tensor of shape :math:`(\text{num_cameras}, 3, 1)`
This is the **t** vector of the extrinsic matrix:
.. math::
\begin{bmatrix}
R & t \\
0 & 1
\end{bmatrix}
.. seealso::
:attr:`cam_pos` for the camera position in world coordinates.
"""
return self.view_matrix()[:, :3, -1:]
def t(self, val: torch.Tensor):
"""Sets the position of world origin in camera coordinates."""
mat = self.view_matrix()
if val.ndim == 1:
val = val.unsqueeze(-1)
mat[:, :3, -1:] = val
self._backend.update(mat)
def __len__(self) -> int:
"""Returns the number of cameras batched in this instance."""
return len(self._backend)
def transform(self, vectors: torch.Tensor) -> torch.Tensor:
r"""Apply rigid transformation of the camera extrinsics such that
objects in world coordinates are transformed to camera space coordinates.
The camera coordinates are cast to the precision of the vectors argument.
Args:
vectors (torch.Tensor):
the vectors, of shape :math:`(\text{num_vectors}, 3)`
or :math:`(\text{num_cameras}, \text{num_vectors}, 3)`
Returns:
(torch.Tensor): the transformed vector, of same shape than ``vectors``
"""
assert self.dtype == vectors.dtype,\
f"CameraExtrinsics of dtype {self.dtype} cannot transform vectors of dtype {vectors.dtype}"
assert self.device == vectors.device, \
f"CameraExtrinsics of device {self.device} cannot transform vectors of device {vectors.device}"
num_cameras = len(self) # C - number of cameras
batch_size = vectors.shape[-2] # B - number of vectors
v = vectors.expand(num_cameras, batch_size, 3)[..., None] # Expand as (C, B, 3, 1)
R = self.R[:, None].expand(num_cameras, batch_size, 3, 3) # Expand as (C, B, 3, 3)
t = self.t[:, None].expand(num_cameras, batch_size, 3, 1) # Expand as (C, B, 3, 1)
transformed_v = R @ v + t
return transformed_v.squeeze(-1) # Return shape: (C, B, 3)
def inv_transform_rays(self, ray_orig: torch.Tensor, ray_dir: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Transforms rays from camera space to world space (hence: "inverse transform").
Apply rigid transformation of the camera extrinsics.
The camera coordinates are cast to the precision of the vectors argument.
Args:
ray_orig (torch.Tensor):
the origins of rays, of shape :math:`(\text{num_rays}, 3)` or
:math:`(\text{num_cameras}, \text{num_rays}, 3)`
ray_dir (torch.Tensor):
the directions of rays, of shape :math:`(\text{num_rays}, 3)` or
:math:`(\text{num_cameras}, \text{num_rays}, 3)`
Returns:
(torch.Tensor, torch.Tensor):
the transformed ray origins and directions, of same shape than inputs
"""
assert self.dtype == ray_orig.dtype == ray_dir.dtype,\
f"CameraExtrinsics of dtype {self.dtype} cannot transform " \
f"ray_orig/dir of dtype {ray_orig.dtype}, {ray_dir.dtype}"
assert self.device == ray_orig.device == ray_dir.device, \
f"CameraExtrinsics of device {self.device} cannot transform " \
f"ray_orig/dir of device {ray_orig.device}, {ray_dir.device}"
num_cameras = len(self) # C - number of cameras
batch_size = ray_dir.shape[-2] # B - number of vectors
d = ray_dir.expand(num_cameras, batch_size, 3)[..., None] # Expand as (C, B, 3, 1)
o = ray_orig.expand(num_cameras, batch_size, 3)[..., None] # Expand as (C, B, 3, 1)
R = self.R[:, None].expand(num_cameras, batch_size, 3, 3) # Expand as (C, B, 3, 3)
t = self.t[:, None].expand(num_cameras, batch_size, 3, 1) # Expand as (C, B, 3, 1)
R_T = R.transpose(2, 3) # Transforms orientation from camera to world
transformed_dir = R_T @ d # Inverse rotation is transposition: R^(-1) = R^T
transformed_orig = R_T @ (o - t)
return transformed_orig.squeeze(-1), transformed_dir.squeeze(-1) # Return shape: (C, B, 3)
def view_matrix(self) -> torch.Tensor:
r"""Returns a column major view matrix for converting vectors from world to camera coordinates
(a.k.a: world2cam matrix):
.. math::
\begin{bmatrix}
r1 & r2 & r3 & tx \\
u1 & u2 & u3 & ty \\
f1 & f2 & f3 & tz \\
0 & 0 & 0 & 1
\end{bmatrix}
with:
- **r**: Right - world x axis, in camera coordinates,
also the camera right axis, in world coordinates
- **u**: Up - world y axis, in camera coordinates,
also the camera up axis, in world coordinates
- **f**: Forward - world z axis, in camera coordinates,
also the camera forward axis, in world coordinates
- **t**: Position - the world origin in camera coordinates
if you're using a different coordinate system, the axes may be permuted.
.. seealso::
:func:`change_coordinate_system()`
The matrix returned by this class supports pytorch differential operations
.. note::
practitioners are advised to choose a representation backend which
supports differentiation of rigid transformations
.. note::
Changes modifying the returned tensor will also update the extrinsics parameters.
Returns:
(torch.Tensor):
the view matrix, of shape :math:`(\text{num_cameras}, 4, 4)` (homogeneous coordinates)
"""
return self._backend.convert_to_mat()
def inv_view_matrix(self) -> torch.Tensor:
r"""Returns the inverse of the view matrix used to convert vectors from camera to world coordinates
(a.k.a: cam2world matrix). This matrix is column major:
.. math::
\begin{bmatrix}
r1 & u1 & f1 & px \\
r2 & u2 & f2 & py \\
r3 & u3 & f3 & pz \\
0 & 0 & 0 & 1
\end{bmatrix}
with:
- **r**: Right - world x axis, in camera coordinates,
also the camera right axis, in world coordinates
- **u**: Up - world y axis, in camera coordinates,
also the camera up axis, in world coordinates
- **f**: Forward - world z axis, in camera coordinates,
also the camera forward axis, in world coordinates
- **t**: Position - the world origin in camera coordinates
if you're using a different coordinate system, the axes may be permuted.
.. seealso::
:func:`change_coordinate_system()`
Returns:
(torch.Tensor):
the inverse view matrix, of shape :math:`(\text{num_cameras}, 4, 4)`
"""
inv_view = torch.eye(4, device=self.device, dtype=self.dtype).repeat(len(self), 1, 1)
R_inv = self.R.transpose(1, 2) # R^-1 = R^T
inv_view[:, :3, :3] = R_inv
inv_view[:, :3, -1:] = -R_inv @ self.t # cam_center = -R^T @ t
return inv_view
def update(self, mat: torch.Tensor):
r"""Updates extrinsics parameters to match the given view matrix.
Args:
mat (torch.Tensor):
the new view matrix, of shape :math:`(\text{num_cameras}, 4, 4)`
"""
self._backend.update(mat)
def translate(self, t: torch.Tensor):
r"""Translates the camera in world coordinates.
The camera orientation axes will not change.
Args:
t (torch.Tensor):
Amount of translation in world space coordinates,
of shape :math:`(3,)` or :math:`(3, 1)` broadcasting over all the cameras,
or :math:`(\text{num_cameras}, 3, 1)` for applying unique translation per camera.
"""
assert self.dtype == t.dtype,\
f"CameraExtrinsics of dtype {self.dtype} cannot translate with tensor of dtype {t.dtype}"
assert self.device == t.device, \
f"CameraExtrinsics of device {self.device} cannot translate with tensor of device {t.device}"
if t.shape[-1] != 1:
t = t[..., None] # Add row dimension,
self.t -= self.R @ t # batch dim is broadcasted if needed
def rotate(self,
yaw: Union[float, torch.Tensor]=None,
pitch: Union[float, torch.Tensor]=None,
roll: Union[float, torch.Tensor]=None):
r"""Executes an inplace rotation of the camera using the given yaw, pitch, and roll amounts.
Input can be float / tensor float units will apply the same rotation on all cameras,
where torch.Tensors allow for applying a per-camera rotation.
Rotation is applied in camera space.
Args:
yaw (torch.Tensor or float):
Amount of rotation in radians around normal direction of right-up plane
pitch (torch.Tensor or float):
Amount of rotation in radians around normal direction of right-forward plane
roll (torch.Tensor or float):
Amount of rotation in radians around normal direction of up-forward plane
"""
if yaw is not None and not isinstance(yaw, torch.Tensor):
yaw = torch.tensor([yaw], device=self.device, dtype=self.dtype)
if pitch is not None and not isinstance(pitch, torch.Tensor):
pitch = torch.tensor([pitch], device=self.device, dtype=self.dtype)
if roll is not None and not isinstance(roll, torch.Tensor):
roll = torch.tensor([roll], device=self.device, dtype=self.dtype)
# Yaw-Pitch-Roll (a.k.a Tait Bryan angles) affect the camera angles as follows:
# camera up (yaw)
# ^ camera forward (roll)
# | ^
# | /
# |/
# -----------> camera right (pitch)
rotation_mat = torch.eye(4, device=self.device, dtype=self.dtype)
if yaw is not None: # Rotate around "camera up" axis
# Batch compatible version of
# torch.tensor([
# [torch.cos(yaw), 0, -torch.sin(yaw), 0],
# [0, 1, 0, 0],
# [torch.sin(yaw), 0, torch.cos(yaw), 0],
# [0, 0, 0, 1]
# ])
rot_yaw = torch.eye(4, device=self.device, dtype=self.dtype).repeat(len(self), 1, 1)
rot_yaw[:, 0, 0] = torch.cos(yaw)
rot_yaw[:, 0, 2] = -torch.sin(yaw)
rot_yaw[:, 2, 0] = torch.sin(yaw)
rot_yaw[:, 2, 2] = torch.cos(yaw)
rotation_mat = rot_yaw @ rotation_mat
if pitch is not None: # Rotate around "camera right" axis
# Batch compatible version of
# torch.tensor([
# [1, 0, 0, 0],
# [0, torch.cos(pitch), torch.sin(pitch), 0],
# [0, -torch.sin(pitch), torch.cos(pitch), 0],
# [0, 0, 0, 1]
# ])
rot_pitch = torch.eye(4, device=self.device, dtype=self.dtype).repeat(len(self), 1, 1)
rot_pitch[:, 1, 1] = torch.cos(pitch)
rot_pitch[:, 1, 2] = torch.sin(pitch)
rot_pitch[:, 2, 1] = -torch.sin(pitch)
rot_pitch[:, 2, 2] = torch.cos(pitch)
rotation_mat = rot_pitch @ rotation_mat
if roll is not None: # Rotate around "camera forward" axis
# Batch compatible version of
# torch.tensor([
# [torch.cos(roll), -torch.sin(roll), 0, 0],
# [torch.sin(roll), torch.cos(roll), 0, 0],
# [0, 0, 1, 0],
# [0, 0, 0, 1]
# ], device=self.device, dtype=self.dtype)
rot_roll = torch.eye(4, device=self.device, dtype=self.dtype).repeat(len(self), 1, 1)
rot_roll[:, 0, 0] = torch.cos(roll)
rot_roll[:, 0, 1] = -torch.sin(roll)
rot_roll[:, 1, 0] = torch.sin(roll)
rot_roll[:, 1, 1] = torch.cos(roll)
rotation_mat = rot_roll @ rotation_mat
mat = rotation_mat.unsqueeze(0) @ self.view_matrix()
self._backend.update(mat)
def move_right(self, amount):
"""Translates the camera along the camera right axis.
Args:
amount (torch.Tensor or float):
Amount of translation, measured in world coordinates
"""
self.t -= self._world_x() * amount
def move_up(self, amount):
"""Translates the camera along the camera up axis.
Args:
amount (torch.Tensor or float):
Amount of translation, measured in world coordinates.
"""
self.t -= self._world_y() * amount
def move_forward(self, amount):
"""Translates the camera along the camera forward axis.
Args:
amount (torch.Tensor or float):
Amount of translation, measured in world coordinates.
"""
self.t -= self._world_z() * amount
def _world_x(self) -> torch.Tensor:
"""Returns:
(torch.Tensor): the world x axis in world coordinates.
"""
right_col = torch.zeros_like(self.t)
right_col[:, 0] = 1.0
return right_col
def _world_y(self) -> torch.Tensor:
"""Returns:
(torch.Tensor): the world y axis in world coordinates.
"""
up_col = torch.zeros_like(self.t)
up_col[:, 1] = 1.0
return up_col
def _world_z(self) -> torch.Tensor:
"""Returns:
(torch.Tensor): the world z axis in world coordinates.
"""
forward_col = torch.zeros_like(self.t)
forward_col[:, 2] = 1.0
return forward_col
def cam_pos(self) -> torch.Tensor:
"""Returns:
(torch.Tensor): the camera position, in world coordinates
"""
R_inv = self.R.transpose(1, 2) # R^-1 = R^T
return -R_inv @ self.t # cam_pos = -R^T @ t
def cam_right(self) -> torch.Tensor:
"""Returns:
(torch.Tensor): the camera right axis, in world coordinates
"""
return self.R.transpose(2, 1) @ self._world_x()
def cam_up(self) -> torch.Tensor:
"""Returns:
(torch.Tensor): the camera up axis, in world coordinates
"""
return self.R.transpose(2, 1) @ self._world_y()
def cam_forward(self) -> torch.Tensor:
r""" Returns the camera forward axis -
See: https://www.scratchapixel.com/lessons/mathematics-physics-for-computer-graphics/lookat-function/framing-lookat-function.html
Returns:
(torch.Tensor): the camera forward axis, in world coordinates."""
return self.R.transpose(2, 1) @ self._world_z()
def parameters(self) -> torch.Tensor:
"""Returns:
(torch.Tensor):
the extrinsics parameters buffer.
This is essentially the underlying representation of the extrinsics,
and is backend dependant.
"""
return self._backend.params
def device(self) -> torch.device:
"""the torch device of parameters tensor"""
return self._backend.device
def dtype(self) -> torch.dtype:
"""the torch dtype of parameters tensor"""
return self._backend.dtype
def to(self, *args, **kwargs) -> CameraExtrinsics:
"""An instance of this object with the parameters tensor on the given device.
If the specified device is the same as this object, this object will be returned.
Otherwise a new object with a copy of the parameters tensor on the requested device will be created.
.. seealso::
:func:`torch.Tensor.to`
"""
converted_backend = self._backend.to(*args, **kwargs)
if self._backend == converted_backend:
return self
else:
extrinsics = CameraExtrinsics(converted_backend)
extrinsics._base_change_matrix = self._base_change_matrix.clone()
return extrinsics
def cpu(self) -> CameraExtrinsics:
return self.to('cpu')
def cuda(self) -> CameraExtrinsics:
return self.to('cuda')
def half(self) -> CameraExtrinsics:
return self.to(torch.float16)
def float(self) -> CameraExtrinsics:
return self.to(torch.float32)
def double(self) -> CameraExtrinsics:
return self.to(torch.float64)
def requires_grad(self) -> bool:
"""True if the current extrinsics object allows gradient flow.
.. note::
All extrinsics backends allow gradient flow, but some are not guaranteed to maintain a rigid
transformation view matrix.
"""
return self._backend.requires_grad
def requires_grad(self, val: bool):
"""Toggle gradient flow for the extrinsics.
.. note::
All extrinsics backends allow gradient flow, but some are not guaranteed to maintain a rigid
transformation view matrix. By default, kaolin will switch the representation backend to one that
supports differentiable rigid transformations. This behaviour is disabled if users explicitly choose
the representation backend through :func:`switch_backend`.
"""
if self.requires_grad != val and not self._shared_fields['user_requested_backend']:
# If the user haven't requested a specific backend, automatically set the
# one which best agrees with the new differentiability state.
# For requires_grad = True, if the params tensor is a leaf node set a differentiable representation backend
if val and self.parameters().is_leaf:
backend = 'matrix_6dof_rotation'
else:
backend = 'matrix_se3'
self._internal_switch_backend(backend)
self._backend.requires_grad = val
def backend_name(self) -> str:
"""the unique name used to register the currently used representation backend.
Values available by default:
- **"matrix_se3"**: A flattened view matrix representation, containing the full information of
special eucilidean transformations (translations and rotations).
This representation is quickly converted to a view matrix, but differentiable ops may cause
the view matrix to learn an incorrect, non-orthogonal transformation.
- **"matrix_6dof_rotation"**: A compact representation with 6 degrees of freedom,
ensuring the view matrix remains orthogonal under optimizations.
The conversion to matrix requires a single Gram-Schmidt step.
"""
value_idx = list(_REGISTERED_BACKENDS.values()).index(type(self._backend))
backend_name = list(_REGISTERED_BACKENDS.keys())[value_idx]
return backend_name
def _base_change_matrix(self):
"""the transformation matrix (permutation + reflections) used to change the coordinates system
of this camera from the default cartesian one to another.
This matrix is manipulated by: :func:`change_coordinate_system()`,
seealso::
:func:`reset_coordinate_system()`
"""
return self._shared_fields.get('base_change_matrix')
def _base_change_matrix(self, value):
"""Sets the transformation matrix (permutation + reflections) used to change the coordinates system
of this camera from the default cartesian one to another.
seealso::
:func:`reset_coordinate_system()`
"""
self._shared_fields['base_change_matrix'] = value
def basis_change_matrix(self):
"""The transformation matrix (permutation + reflections) used to change the coordinates system
of this camera from the default cartesian one to another.
This matrix is manipulated by: :func:`change_coordinate_system()`,
:func:`reset_coordinate_system()`
"""
return self._base_change_matrix
def gradient_mask(self, *args: Union[str, ExtrinsicsParamsDefEnum]) -> torch.Tensor:
r"""Creates a gradient mask, which allows to backpropagate only through params designated as trainable.
This function does not consider the ``requires_grad`` field when creating this mask.
.. note::
The 3 camera axes are always masked as trainable together.
This design choice ensures that these axes, as well as the view matrix, remain orthogonal.
Args:
*args: A vararg list of the extrinsics params that should allow gradient flow.
This function also supports conversion of params from their string names.
(i.e: 't' will convert to ``ExtrinsicsParamsDefEnum.t``)
Example:
>>> # equivalent to: mask = extrinsics.gradient_mask(ExtrinsicsParamsDefEnum.t)
>>> mask = extrinsics.gradient_mask('t')
>>> extrinsics.params.register_hook(lambda grad: grad * mask.float())
>>> # extrinsics will now allow gradient flow only for the camera location
Returns:
(torch.BoolTensor): the gradient mask, of same shape than ``self.parameters()``
"""
try:
# Convert str args to ExtrinsicsParamsDefEnum subclass values
args = [ExtrinsicsParamsDefEnum[a] if isinstance(a, str) else a for a in args]
except KeyError as e:
raise ValueError(f'Camera\'s set_trainable_params() received an unsupported arg: {e}')
mask = torch.zeros_like(self.parameters()).bool()
for param in args:
# The indices of each extrinsic param are backend dependant
indices = self._backend.param_idx(param)
mask[:, indices] = 1.0
return mask
def __getitem__(self, item) -> CameraExtrinsics:
r"""Returns a view on a specific cameras from the batch of cameras managed by this object.
Returns:
(CameraExtrinsics):
A subset of camera's extrinsics from this batched object,
of shape :math:`(\text{size_slice}, 4, 4)`"""
return CameraExtrinsics(self._backend[item], self._shared_fields)
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in _HANDLED_TORCH_FUNCTIONS or not all(
issubclass(t, (torch.Tensor, CameraExtrinsics))
for t in types
):
return NotImplemented
return _HANDLED_TORCH_FUNCTIONS[func](*args, **kwargs)
def available_backends(cls) -> Iterable[str]:
"""Returns:
(iterable of str):
list of available representation backends,
to be used with :func:`switch_backend`
"""
return _REGISTERED_BACKENDS.keys()
def cat(cls, cameras: Sequence[CameraExtrinsics]):
"""Concatenate multiple CameraExtrinsics's.
Assumes all cameras use the same coordinate system.
(kaolin will not alert if not, the coordinate system will be selected as the first camera)
Args:
cameras (Sequence of CameraExtrinsics): the cameras extrinsics to concatenate.
Returns:
(CameraExtrinsics): The concatenated cameras extrinsics as a single CameraExtrinsics
"""
if len(cameras) == 0:
return None
view_mats = [c.view_matrix() for c in cameras]
batched_cams = torch.cat(view_mats, dim=0)
extrinsics = CameraExtrinsics.from_view_matrix(batched_cams,
device=cameras[0].device,
dtype=cameras[0].dtype,
requires_grad=cameras[0].requires_grad,
backend=cameras[0].backend_name)
extrinsics._base_change_matrix = cameras[0]._base_change_matrix
return extrinsics
def named_params(self) -> List[Dict[str, float]]:
"""Get a descriptive list of named parameters per camera.
Returns:
(list of dict): The named parameters.
"""
named_params_per_camera = []
params = self.parameters()
R_idx = self._backend.param_idx(ExtrinsicsParamsDefEnum.R)
t_idx = self._backend.param_idx(ExtrinsicsParamsDefEnum.t)
# Collect the parameters of each of the cameras
for camera_idx in range(len(self)):
cam_params = dict(
R=params[camera_idx, R_idx],
t=params[camera_idx, t_idx]
)
named_params_per_camera.append(cam_params)
return named_params_per_camera
def __repr__(self) -> str:
title = f"CameraExtrinsics of {len(self)} cameras, device: {self.device}, dtype: {self.dtype}, " \
f"backend: {type(self._backend).__name__}.\n"
coords = f"Coordinates basis: \n{self.basis_change_matrix}.\n"
params_txt = f"Extrinsic params: {self.parameters()}\n"
return ''.join([title] + [coords] + [params_txt])
def __str__(self) -> str:
return f"CameraExtrinsics of {len(self)} cameras, of coordinate system: \n{self.basis_change_matrix}. \n" + \
'\n'.join([
f"Camera #{idx} View Matrix: \n{self.view_matrix()},\n" \
f"Camera #{idx} Inverse View Matrix: \n{self.inv_view_matrix()}\n"
for idx in range(len(self))
])
The provided code snippet includes necessary dependencies for implementing the `allclose` function. Write a Python function `def allclose(input: CameraExtrinsics, other: CameraExtrinsics, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool` to solve the following problem:
:func:`torch.allclose` compatibility implementation for CameraExtrinsics. Args: input (Camera): first camera to compare other (Camera): second camera to compare atol (float, optional): absolute tolerance. Default: 1e-08 rtol (float, optional): relative tolerance. Default: 1e-05 equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` Returns: (bool): Result of the comparison
Here is the function:
def allclose(input: CameraExtrinsics, other: CameraExtrinsics, rtol: _float = 1e-05, atol: _float = 1e-08,
equal_nan: _bool = False) -> _bool:
""":func:`torch.allclose` compatibility implementation for CameraExtrinsics.
Args:
input (Camera): first camera to compare
other (Camera): second camera to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal.
Default: ``False``
Returns:
(bool): Result of the comparison
"""
return input.backend_name == other.backend_name and \
torch.allclose(input.parameters(), other.parameters(), rtol=rtol, atol=atol, equal_nan=equal_nan) | :func:`torch.allclose` compatibility implementation for CameraExtrinsics. Args: input (Camera): first camera to compare other (Camera): second camera to compare atol (float, optional): absolute tolerance. Default: 1e-08 rtol (float, optional): relative tolerance. Default: 1e-05 equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` Returns: (bool): Result of the comparison |
4,645 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Type, Union
from enum import IntEnum
import warnings
import torch
_REGISTERED_BACKENDS = dict()
class ExtrinsicsRep(ABC):
"""
An abstract class for representing CameraExtrinsics representation backends.
This class keeps the separation between the parameter representation space and the associated rigid
transformation (usually represented as a view matrix) separate.
Different representations are tuned to varied use cases: speed, differentiability w.r.t rigid transformations space,
and so forth.
"""
def __init__(self, params: torch.Tensor,
dtype: torch.dtype = None,
device: Union[torch.device, str] = None,
requires_grad: bool = False):
self.params = params
if device is not None:
self.params = self.params.to(device=device, dtype=dtype)
elif dtype is not None:
self.params = self.params.to(dtype=dtype)
# If the params tensor already has a gradient computation graph
if self.params.grad_fn is not None:
if not requires_grad: # if the requires_grad arg is true, do nothing, we're set
# Otherwise, if explicitly requested not to have grads,
# then detach computation graph to create a separate tensor
self.params = self.params.detach()
self.params.requires_grad = requires_grad
else:
# No computation graph was generated so requires_grad can be safely set
self.params.requires_grad = requires_grad
def from_mat(cls, mat: torch.Tensor, dtype: torch.dtype = None, device: Union[torch.device, str] = None,
requires_grad: bool = False):
""" Constructs backend from given (C, 4, 4) view matrix. """
params = cls.convert_from_mat(mat)
return cls(params, dtype, device, requires_grad)
def update(self, mat: torch.Tensor):
""" Updates the underlying representation by mapping the 4x4 view matrix to representation space """
self.params = self.convert_from_mat(mat)
def convert_to_mat(self) -> torch.Tensor:
""" Converts the underlying representation to view-matrix form of shape (C, 4, 4) """
pass
def convert_from_mat(cls, mat: torch.Tensor) -> torch.Tensor:
""" Converts a view-matrix to the underlying representation form of shape (C, K) where K is the number
of representation parameters.
"""
pass
def param_idx(cls, param: ExtrinsicsParamsDefEnum):
""" Returns the indices of elements in the 'self.params' field of instances of this class, which are belong
under the ExtrinsicsParamsDefEnum param argument.
i.e: For ExtrinsicsParamsDefEnum.R, a 4x4 matrix representation will return the 9 indices of
the camera axes R component.
"""
pass
def __getitem__(self, item):
""" :return torch.Tensor of shape (M, 4, 4) representing a single camera's extrinsics from this batched object
"""
params = self.params[item]
if params.ndim < self.params.ndim:
params = params.unsqueeze(0)
entry = type(self)(params, dtype=self.dtype, device=self.device, requires_grad=self.requires_grad)
return entry
def to(self, *args, **kwargs):
""" Cast to a different device / dtype """
converted_params = self.params.to(*args, **kwargs)
if self.params.device == converted_params.device and self.params.dtype == converted_params.dtype:
return self
else:
return type(self)(converted_params)
def device(self) -> torch.device:
""" :return the torch device of parameters tensor """
return self.params.device
def dtype(self) -> torch.dtype:
return self.params.dtype
def requires_grad(self) -> bool:
return self.params.requires_grad
def requires_grad(self, val: bool):
self.params.requires_grad = val
def __len__(self) -> int:
return self.params.shape[0]
def backend_name(cls) -> str:
pass
The provided code snippet includes necessary dependencies for implementing the `register_backend` function. Write a Python function `def register_backend(backend_class: Type[ExtrinsicsRep])` to solve the following problem:
Registers a representation backend class with a unique name. CameraExtrinsics can switch between registered representations dynamically (see switch_backend()).
Here is the function:
def register_backend(backend_class: Type[ExtrinsicsRep]):
"""Registers a representation backend class with a unique name.
CameraExtrinsics can switch between registered representations dynamically (see switch_backend()).
"""
_REGISTERED_BACKENDS[backend_class.backend_name()] = backend_class
return backend_class | Registers a representation backend class with a unique name. CameraExtrinsics can switch between registered representations dynamically (see switch_backend()). |
4,646 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Type, Dict, List, Union, Sequence
from enum import IntEnum
import functools
import copy
import torch
from torch.types import _float, _bool
_HANDLED_TORCH_FUNCTIONS = dict()
The provided code snippet includes necessary dependencies for implementing the `implements` function. Write a Python function `def implements(torch_function)` to solve the following problem:
Registers a torch function override for CameraIntrinsics
Here is the function:
def implements(torch_function):
"""Registers a torch function override for CameraIntrinsics"""
@functools.wraps(torch_function)
def decorator(func):
_HANDLED_TORCH_FUNCTIONS[torch_function] = func
return func
return decorator | Registers a torch function override for CameraIntrinsics |
4,647 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Type, Dict, List, Union, Sequence
from enum import IntEnum
import functools
import copy
import torch
from torch.types import _float, _bool
The provided code snippet includes necessary dependencies for implementing the `up_to_homogeneous` function. Write a Python function `def up_to_homogeneous(vectors: torch.Tensor)` to solve the following problem:
Up-projects vectors to homogeneous coordinates of four dimensions. If the vectors are already in homogeneous coordinates, this function return the inputs. Args: vectors (torch.Tensor): the inputs vectors to project, of shape :math:`(..., 3)` Returns: (torch.Tensor): The projected vectors, of same shape than inputs but last dim to be 4
Here is the function:
def up_to_homogeneous(vectors: torch.Tensor):
"""Up-projects vectors to homogeneous coordinates of four dimensions.
If the vectors are already in homogeneous coordinates, this function return the inputs.
Args:
vectors (torch.Tensor):
the inputs vectors to project, of shape :math:`(..., 3)`
Returns:
(torch.Tensor): The projected vectors, of same shape than inputs but last dim to be 4
"""
if vectors.shape[-1] == 4:
return vectors
return torch.cat([vectors, torch.ones_like(vectors[..., 0:1])], dim=-1) | Up-projects vectors to homogeneous coordinates of four dimensions. If the vectors are already in homogeneous coordinates, this function return the inputs. Args: vectors (torch.Tensor): the inputs vectors to project, of shape :math:`(..., 3)` Returns: (torch.Tensor): The projected vectors, of same shape than inputs but last dim to be 4 |
4,648 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Type, Dict, List, Union, Sequence
from enum import IntEnum
import functools
import copy
import torch
from torch.types import _float, _bool
The provided code snippet includes necessary dependencies for implementing the `down_from_homogeneous` function. Write a Python function `def down_from_homogeneous(homogeneous_vectors: torch.Tensor)` to solve the following problem:
(1) Performs perspective division by dividing each vector by its w coordinate. (2) Down-projects vectors from 4D homogeneous space to 3D space. Args: homogenenous_vectors: the inputs vectors, of shape :math:`(..., 4)` Returns: (torch.Tensor): the 3D vectors, of same shape than inputs but last dim to be 3
Here is the function:
def down_from_homogeneous(homogeneous_vectors: torch.Tensor):
"""(1) Performs perspective division by dividing each vector by its w coordinate.
(2) Down-projects vectors from 4D homogeneous space to 3D space.
Args:
homogenenous_vectors: the inputs vectors, of shape :math:`(..., 4)`
Returns:
(torch.Tensor): the 3D vectors, of same shape than inputs but last dim to be 3
"""
return homogeneous_vectors[..., :-1] / homogeneous_vectors[..., -1:] | (1) Performs perspective division by dividing each vector by its w coordinate. (2) Down-projects vectors from 4D homogeneous space to 3D space. Args: homogenenous_vectors: the inputs vectors, of shape :math:`(..., 4)` Returns: (torch.Tensor): the 3D vectors, of same shape than inputs but last dim to be 3 |
4,649 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Type, Dict, List, Union, Sequence
from enum import IntEnum
import functools
import copy
import torch
from torch.types import _float, _bool
class CameraIntrinsics(ABC):
r"""Holds the intrinsics parameters of a camera: how it should project from camera space to
normalized screen / clip space.
The instrinsics are determined by the camera type, meaning parameters may differ according to the lens structure.
Typical computer graphics systems commonly assume the intrinsics of a pinhole camera (see: :class:`PinholeIntrinsics` class).
One implication is that some camera types do not use a linear projection (i.e: Fisheye lens).
There are therefore numerous ways to use CameraIntrinsics subclasses:
1. Access intrinsics parameters directly.
This may typically benefit use cases such as ray generators.
2. The :func:`transform()` method is supported by all CameraIntrinsics subclasses,
both linear and non-linear transformations, to project vectors from camera space to normalized screen space.
This method is implemented using differential pytorch operations.
3. Certain CameraIntrinsics subclasses which perform linear projections, may expose the transformation matrix
via dedicated methods.
For example, :class:`PinholeIntrinsics` exposes a :func:`projection_matrix()` method.
This may typically be useful for rasterization based rendering pipelines (i.e: OpenGL vertex shaders).
This class is batched and may hold information from multiple cameras.
Parameters are stored as a single tensor of shape :math:`(\text{num_cameras}, K)` where K is the number of
intrinsic parameters.
"""
def __init__(self, width: int, height: int, params: torch.Tensor, near: float, far: float):
# Make batched
if params.ndim == 1:
params = params.unsqueeze(0)
self.params: torch.Tensor = params # Buffer for camera intrinsic params, shape (C, K)
# _shared_fields ensures that views created on this instance will mirror any changes back
# These fields can be accessed as simple properties
self._shared_fields = dict(
width=width, # Screen resolution (x), int
height=height, # Screen resolution (y), int
near=float(near), # Near clipping plane, float
far=float(far), # Far clipping plane, float
ndc_min=-1.0, # Min value of NDC space
ndc_max=1.0 # Max value of NDC space
)
def aspect_ratio(self) -> float:
"""Returns the aspect ratio of the cameras held by this object."""
return self.width / self.height
def projection_matrix(self):
raise NotImplementedError('This projection of this camera type is non-linear in homogeneous coordinates '
'and therefore does not support a projection matrix. Use self.transform() instead.')
def viewport_matrix(self, vl=0, vr=None, vb=0, vt=None, min_depth=0.0, max_depth=1.0) -> torch.Tensor:
r"""Constructs a viewport matrix which transforms coordinates from NDC space to pixel space.
This is the general matrix form of glViewport, familiar from OpenGL.
NDC coordinates are expected to be in:
* [-1, 1] for the (x,y) coordinates.
* [ndc_min, ndc_max] for the (z) coordinate.
Pixel coordinates are in:
* [vl, vr] for the (x) coordinate.
* [vb, vt] for the (y) coordinate.
* [0, 1] for the (z) coordinate (yielding normalized depth).
When used in conjunction with a :func:`projection_matrix()`, a transformation from camera view space to
window space can be obtained.
Note that for the purpose of rendering with OpenGL shaders, this matrix is not required, as viewport
transformation is already applied by the hardware.
By default, this matrix assumes the NDC screen spaces have the y axis pointing up.
Under this assumption, and a [-1, 1] NDC space,
the default values of this method are compatible with OpenGL glViewport.
.. seealso::
glViewport() at https://registry.khronos.org/OpenGL-Refpages/gl4/html/glViewport.xhtml
and https://en.wikibooks.org/wiki/GLSL_Programming/Vertex_Transformations#Viewport_Transformation
projection_matrix() which converts coordinates from camera view space to NDC space.
.. note::
1. This matrix changes form depending on the NDC space used.
2. Returned values are floating points, rather than integers
(thus this method is compatible with antialising ops).
Args:
vl (int): Viewport left (pixel coordinates x) - where the viewport starts. Default is 0.
vr (int): Viewport right (pixel coordinates x) - where the viewport ends. Default is camera width.
vb (int): Viewport bottom (pixel coordinates y) - where the viewport starts. Default is 0.
vt (int): Viewport top (pixel coordinates y) - where the viewport ends. Default is camera height.
min_depth (float): Minimum of output depth range. Default is 0.0.
max (float): Maximum of output depth range. Default is 1.0.
Returns:
(torch.Tensor): the viewport matrix, of shape :math:`(1, 4, 4)`.
"""
if vr is None:
vr = self.width
if vt is None:
vt = self.height
vl = float(vl)
vr = float(vr)
vb = float(vb)
vt = float(vt)
# From NDC space
ndc_min_x = -1.0
ndc_min_y = -1.0
ndc_min_z = self.ndc_min
ndc_max_x = 1.0
ndc_max_y = 1.0
ndc_max_z = self.ndc_max
ndc_width = ndc_max_x - ndc_min_x # All ndc spaces assume x clip coordinates in [-1, 1]
ndc_height = ndc_max_y - ndc_min_y # All ndc spaces assume y clip coordinates in [-1, 1]
ndc_depth = ndc_max_z - ndc_min_z # NDC depth range, this is NDC space dependent
# To screen space
vw = vr - vl # Viewport width
vh = vt - vb # Viewport height
out_depth_range = max_depth - min_depth # By default, normalized depth is assumed [0, 1]
# Recall that for OpenGL NDC space and full screen viewport, the following matrix is given,
# where vw, vh stand for screen width and height:
# [vw/2, 0.0, 0.0, vw/2] @ [ x ] = .. perspective = [(x/w + 1) * (vw/2)]
# [0.0, vh/2, 0.0, vh/2] [ y ] division [(y/w + 1) * (vh/2)]
# [0.0, 0.0, 1/2, 1/2] [ z ] ------------> [(z/w + 1) / 2]
# [0.0, 0.0, 0.0, 1.0] [ w ] (/w) [ 1.0 ]
# The matrix is non differentiable, as viewport coordinates are a fixed standard set by the graphics api
ndc_mat = self.params.new_tensor([
[vw / ndc_width, 0.0, 0.0, -(ndc_min_x / ndc_width) * vw + vl],
[0.0, vh / ndc_height, 0.0, -(ndc_min_y / ndc_height) * vh + vb],
[0.0, 0.0, out_depth_range / ndc_depth, -(ndc_min_z / ndc_depth) * out_depth_range + min_depth],
[0.0, 0.0, 0.0, 1.0]
])
# Add batch dim, to allow broadcasting
return ndc_mat.unsqueeze(0)
def transform(self, vectors: torch.Tensor) -> torch.Tensor:
r"""Projects the vectors from view space / camera space to NDC (normalized device coordinates) space.
The NDC space used by kaolin is a left-handed coordinate system which uses OpenGL conventions::
Y Z
^ /
| /
|---------> X
The coordinates returned by this class are not concerned with clipping, and therefore the range
of values returned by this transformation is not numerically bounded between :math:`[-1, 1]`.
To support a wide range of lens, this function is compatible with both linaer or non-linear transformations
(which are not representable by matrices).
CameraIntrinsics subclasses should always implement this method using pytorch differential operations.
Args:
vectors (torch.Tensor):
the vectors to be transformed,
can homogeneous of shape :math:`(\text{num_vectors}, 4)`
or :math:`(\text{num_cameras}, \text{num_vectors}, 4)`
or non-homogeneous of shape :math:`(\text{num_vectors}, 3)`
or :math:`(\text{num_cameras}, \text{num_vectors}, 3)`
Returns:
(torch.Tensor): the transformed vectors, of same shape than ``vectors`` but last dim 3
"""
raise NotImplementedError
def param_types(cls) -> Type[IntrinsicsParamsDefEnum]:
"""
Returns:
(IntrinsicsParamsDefEnum):
an enum describing each of the intrinsic parameters managed by the subclass.
This enum also defines the order in which values are kept within the params buffer.
"""
raise NotImplementedError
def param_count(self) -> int:
"""
Returns:
(int): number of intrinsic parameters managed per camera
"""
return len(self.param_types())
def named_params(self) -> List[Dict[str, float]]:
"""Get a descriptive list of named parameters per camera.
Returns:
(list of dict): The named parameters.
"""
param_names = self.param_types()._member_names_
named_params_per_camera = []
# Collect the parameters of each of the cameras
for camera_idx in range(len(self)):
cam_params = {p_name: self.params[camera_idx, p_idx].item() for p_idx, p_name in enumerate(param_names)}
named_params_per_camera.append(cam_params)
return named_params_per_camera
def _allocate_params(cls, *args,
num_cameras: int = 1,
device: Union[torch.device, str] = None,
dtype: torch.dtype = default_dtype) -> torch.Tensor:
r"""Allocates the intrinsic parameters buffer of a single camera as a torch tensor.
Args:
*args: the values to be kept on the buffer
num_cameras (optional, int): the number of cameras to allocated for. Default: 1
device (optional, str):
the torch device on which the parameters tensor should be allocated.
Default: cpu
Returns:
(torch.Tensor): the allocated params tensor
"""
assert len(args) == len(cls.param_types()) # Verify content matches subclass params enum definition
params = torch.tensor(args, device=device, dtype=dtype)
params = params.unsqueeze(0)
params = params.repeat(num_cameras, 1)
return params
def _set_param(self, val: Union[float, torch.Tensor], param_idx: IntrinsicsParamsDefEnum):
r"""Writes a value to the intrinsics parameters buffer of all cameras.
Args:
val (float or torch.Tensor): the new value to set in the intrinsics parameters buffer.
If val is a float or a scalar tensor, this value will be set to all cameras.
If val is a 1D torch.Tensor of size :math:`\text{num_cameras}`,
each camera tracked by this class will be updated with the corresponding value.
param_idx (IntrinsicsParamsDefEnum): index of the parameter to be set
"""
if isinstance(val, float) or isinstance(val, int): # All cameras use same value
self.params[:, param_idx] = torch.full_like(self.params[:, 0], val) # TODO(operel): can just use: =val
elif val.ndim == 0: # All cameras use same value
self.params[:, param_idx] = val.unsqueeze(0).repeat(len(self), 1) # TODO(operel): can just use broadcast
elif val.ndim == 1: # Each camera set with different value
self.params[:, param_idx] = val
def zoom(self, amount):
r"""Applies a zoom on the camera by adjusting the lens.
Args:
amount: Amount of adjustment
"""
raise NotImplementedError
def to(self, *args, **kwargs) -> CameraIntrinsics:
"""An instance of this object with the parameters tensor on the given device.
If the specified device is the same as this object, this object will be returned.
Otherwise a new object with a copy of the parameters tensor on the requested device will be created.
.. seealso::
:func:`torch.Tensor.to`
"""
converted_params = self.params.to(*args, **kwargs)
if self.params.dtype == converted_params.dtype and self.params.device == converted_params.device:
return self
else:
new_instance = copy.deepcopy(self)
new_instance.params = converted_params
return new_instance
def gradient_mask(self, *args: Union[str, IntrinsicsParamsDefEnum]) -> torch.Tensor:
"""Creates a gradient mask, which allows to backpropagate only through params designated as trainable.
This function does not consider the requires_grad field when creating this mask.
Args:
*args: A vararg list of the intrinsic params that should allow gradient flow.
This function also supports conversion of params from their string names.
(i.e: 'focal_x' will convert to ``PinholeParamsDefEnum.focal_x``)
Example:
>>> # equivalent to: mask = intrinsics.gradient_mask(IntrinsicsParamsDefEnum.focal_x,
>>> # IntrinsicsParamsDefEnum.focal_y)
>>> mask = intrinsics.gradient_mask('focal_x', 'focal_y')
>>> intrinsics.params.register_hook(lambda grad: grad * mask.float())
>>> # intrinsics will now allow gradient flow only for PinholeParamsDefEnum.focal_x and
>>> # PinholeParamsDefEnum.focal_y.
"""
try:
# Get the enum type for this kind of intrinsics class, used to convert str args to
# IntrinsicsParamsDefEnum subclass values
param_def_enum = self.param_types()
args = [param_def_enum[a] if isinstance(a, str) else a for a in args]
except KeyError as e:
raise ValueError(f'Camera\'s set_trainable_params() received an unsupported arg: {e}')
mask = torch.zeros_like(self.params).bool()
for param in args:
mask[:, param.value] = 1.0
return mask
def clip_mask(self, depth: torch.Tensor) -> torch.BoolTensor:
r"""Creates a boolean mask for clipping depth values which fall out of the view frustum.
Args:
depth (torch.Tensor): depth values
Returns:
(torch.BoolTensor):
a mask, marking whether ``depth`` values are within the view frustum or not,
of same shape than depth.
"""
min_mask = depth.ge(min(self.near, self.far))
max_mask = depth.le(max(self.near, self.far))
return torch.logical_and(min_mask, max_mask)
def lens_type(self) -> str:
raise NotImplementedError
def device(self) -> str:
"""the torch device of parameters tensor"""
return self.params.device
def dtype(self):
"""the torch dtype of parameters tensor"""
return self.params.dtype
def requires_grad(self) -> bool:
"""True if the current intrinsics object allows gradient flow"""
return self.params.requires_grad
def requires_grad(self, val: bool):
"""Toggle gradient flow for the intrinsics"""
self.params.requires_grad = val
def parameters(self) -> torch.Tensor:
"""Returns:
(torch.Tensor): the intrinsics parameters buffer
"""
return self.params
def cpu(self) -> CameraIntrinsics:
return self.to('cpu')
def cuda(self) -> CameraIntrinsics:
return self.to('cuda')
def half(self) -> CameraIntrinsics:
return self.to(torch.float16)
def float(self) -> CameraIntrinsics:
return self.to(torch.float32)
def double(self) -> CameraIntrinsics:
return self.to(torch.float64)
def cat(cls, cameras: Sequence[CameraIntrinsics]):
"""Concatenate multiple CameraIntrinsics's.
Assumes all cameras use the same width, height, near and far planes.
Args:
cameras (Sequence of CameraIntrinsics): the cameras to concatenate.
Returns:
(CameraIntrinsics): The concatenated cameras as a single CameraIntrinsics.
"""
if len(cameras) == 0:
return None
params = [c.params for c in cameras]
output = copy.deepcopy(cameras[0])
output.params = torch.cat(params, dim=0)
return output
def set_ndc_range(self, ndc_min, ndc_max):
"""
.. warning::
This method is not implemented
"""
# TODO(operel): comment out after properly testing for next version
raise NotImplementedError('Currently only NDC space of [-1, 1] is supported.')
# self._shared_fields['ndc_min'] = ndc_min
# self._shared_fields['ndc_max'] = ndc_max
def __getitem__(self, item) -> CameraIntrinsics:
"""Indexes a specific camera from the batch of cameras tracked by this class.
Args:
item (int or slice): Zero based camera index.
Returns:
(CameraIntrinsics):
A new instance of this class viewing a single camera.
The returned instance will track a parameters tensor, of shape :math:`(M, K)`,
where K is the number of intrinsic parameters and M is the length of item.
The parameters tensor of the new instance is a view of the current object parameters,
and therefore changes to either will be reflected in both.
"""
shallow = copy.copy(self) # Gather all non-param fields
params = self.params[item]
if params.ndim < self.params.ndim:
params = params.unsqueeze(0)
shallow.params = params
return shallow
def __len__(self) -> int:
"""Returns Number of cameras tracked by this object """
return self.params.shape[0]
def __str__(self) -> str:
named_params = self.named_params()
title = f"{type(self).__name__} of {len(self)} cameras of resolution {self.width}x{self.height}.\n"
entries = [f"Camera #{cam_idx}: {cam_params}\n" for cam_idx, cam_params in enumerate(named_params)]
return ''.join([title] + entries)
def __repr__(self) -> str:
return f"{type(self).__name__} of {self.width}x{self.height}, params: {self.params.__repr__()}"
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in _HANDLED_TORCH_FUNCTIONS or not all(
issubclass(t, (torch.Tensor, CameraIntrinsics))
for t in types
):
return NotImplemented
return _HANDLED_TORCH_FUNCTIONS[func](*args, **kwargs)
def width(self) -> int:
return self._shared_fields['width']
def width(self, value: int) -> None:
self._shared_fields['width'] = value
def height(self) -> int:
return self._shared_fields['height']
def height(self, value: int) -> None:
self._shared_fields['height'] = value
def near(self) -> float:
return self._shared_fields['near']
def near(self, value: float) -> None:
self._shared_fields['near'] = value
def far(self) -> float:
return self._shared_fields['far']
def far(self, value: float) -> None:
self._shared_fields['far'] = value
def ndc_min(self) -> float:
return self._shared_fields['ndc_min']
def ndc_max(self) -> float:
return self._shared_fields['ndc_max']
The provided code snippet includes necessary dependencies for implementing the `allclose` function. Write a Python function `def allclose(input: CameraIntrinsics, other: CameraIntrinsics, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool` to solve the following problem:
:func:`torch.allclose` compatibility implementation for CameraIntrinsics. Args: input (Camera): first camera to compare other (Camera): second camera to compare atol (float, optional): absolute tolerance. Default: 1e-08 rtol (float, optional): relative tolerance. Default: 1e-05 equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` Returns: (bool): Result of the comparison
Here is the function:
def allclose(input: CameraIntrinsics, other: CameraIntrinsics, rtol: _float = 1e-05, atol: _float = 1e-08,
equal_nan: _bool = False) -> _bool:
""":func:`torch.allclose` compatibility implementation for CameraIntrinsics.
Args:
input (Camera): first camera to compare
other (Camera): second camera to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal.
Default: ``False``
Returns:
(bool): Result of the comparison
"""
if type(input) != type(other):
return False
return torch.allclose(input.params, other.params, rtol=rtol, atol=atol, equal_nan=equal_nan) and \
input.width == other.width and input.height == other.height | :func:`torch.allclose` compatibility implementation for CameraIntrinsics. Args: input (Camera): first camera to compare other (Camera): second camera to compare atol (float, optional): absolute tolerance. Default: 1e-08 rtol (float, optional): relative tolerance. Default: 1e-05 equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` Returns: (bool): Result of the comparison |
4,650 | import warnings
from kaolin import _C
import torch
The provided code snippet includes necessary dependencies for implementing the `unbatched_raytrace` function. Write a Python function `def unbatched_raytrace(octree, point_hierarchy, pyramid, exsum, origin, direction, level, return_depth=True, with_exit=False)` to solve the following problem:
r"""Apply ray tracing over an unbatched SPC structure. The SPC model will be always normalized between -1 and 1 for each axis. Args: octree (torch.ByteTensor): the octree structure, of shape :math:`(\text{num_bytes})`. point_hierarchy (torch.ShortTensor): the point hierarchy associated to the octree, of shape :math:`(\text{num_points}, 3)`. pyramid (torch.IntTensor): the pyramid associated to the octree, of shape :math:`(2, \text{max_level} + 2)`. exsum (torch.IntTensor): the prefix sum associated to the octree. of shape :math:`(\text{num_bytes} + \text{batch_size})`. origin (torch.FloatTensor): the origins of the rays, of shape :math:`(\text{num_rays}, 3)`. direction (torch.FloatTensor): the directions of the rays, of shape :math:`(\text{num_rays}, 3)`. level (int): level to use from the octree. return_depth (bool): return the depth of each voxel intersection. (Default: True) with_exit (bool): return also the exit intersection depth. (Default: False) Returns: (torch.IntTensor, torch.IntTensor, (optional) torch.FloatTensor): - Ray index of intersections sorted by depth of shape :math:`(\text{num_intersection})` - Point hierarchy index of intersections sorted by depth of shape :math:`(\text{num_intersection})` These indices will be `IntTensor`s, but they can be used for indexing with `torch.index_select`. - If return_depth is true: Float tensor of shape :math:`(\text{num_intersection}), 1` of entry depths to each AABB intersection. When `with_exit` is set, returns shape :math:`(\text{num_intersection}), 2` of entry and exit depths.
Here is the function:
def unbatched_raytrace(octree, point_hierarchy, pyramid, exsum, origin, direction, level,
return_depth=True, with_exit=False):
r"""Apply ray tracing over an unbatched SPC structure.
The SPC model will be always normalized between -1 and 1 for each axis.
Args:
octree (torch.ByteTensor): the octree structure,
of shape :math:`(\text{num_bytes})`.
point_hierarchy (torch.ShortTensor): the point hierarchy associated to the octree,
of shape :math:`(\text{num_points}, 3)`.
pyramid (torch.IntTensor): the pyramid associated to the octree,
of shape :math:`(2, \text{max_level} + 2)`.
exsum (torch.IntTensor): the prefix sum associated to the octree.
of shape :math:`(\text{num_bytes} + \text{batch_size})`.
origin (torch.FloatTensor): the origins of the rays,
of shape :math:`(\text{num_rays}, 3)`.
direction (torch.FloatTensor): the directions of the rays,
of shape :math:`(\text{num_rays}, 3)`.
level (int): level to use from the octree.
return_depth (bool): return the depth of each voxel intersection. (Default: True)
with_exit (bool): return also the exit intersection depth. (Default: False)
Returns:
(torch.IntTensor, torch.IntTensor, (optional) torch.FloatTensor):
- Ray index of intersections sorted by depth of shape :math:`(\text{num_intersection})`
- Point hierarchy index of intersections sorted by depth of shape :math:`(\text{num_intersection})`
These indices will be `IntTensor`s, but they can be used for indexing with `torch.index_select`.
- If return_depth is true:
Float tensor of shape :math:`(\text{num_intersection}), 1` of entry
depths to each AABB intersection. When `with_exit` is set, returns
shape :math:`(\text{num_intersection}), 2` of entry and exit depths.
"""
output = _C.render.spc.raytrace_cuda(
octree.contiguous(),
point_hierarchy.contiguous(),
pyramid.contiguous(),
exsum.contiguous(),
origin.contiguous(),
direction.contiguous(),
level,
return_depth,
with_exit)
nuggets = output[0]
ray_index = nuggets[..., 0]
point_index = nuggets[..., 1]
if return_depth:
return ray_index, point_index, output[1]
else:
return ray_index, point_index | r"""Apply ray tracing over an unbatched SPC structure. The SPC model will be always normalized between -1 and 1 for each axis. Args: octree (torch.ByteTensor): the octree structure, of shape :math:`(\text{num_bytes})`. point_hierarchy (torch.ShortTensor): the point hierarchy associated to the octree, of shape :math:`(\text{num_points}, 3)`. pyramid (torch.IntTensor): the pyramid associated to the octree, of shape :math:`(2, \text{max_level} + 2)`. exsum (torch.IntTensor): the prefix sum associated to the octree. of shape :math:`(\text{num_bytes} + \text{batch_size})`. origin (torch.FloatTensor): the origins of the rays, of shape :math:`(\text{num_rays}, 3)`. direction (torch.FloatTensor): the directions of the rays, of shape :math:`(\text{num_rays}, 3)`. level (int): level to use from the octree. return_depth (bool): return the depth of each voxel intersection. (Default: True) with_exit (bool): return also the exit intersection depth. (Default: False) Returns: (torch.IntTensor, torch.IntTensor, (optional) torch.FloatTensor): - Ray index of intersections sorted by depth of shape :math:`(\text{num_intersection})` - Point hierarchy index of intersections sorted by depth of shape :math:`(\text{num_intersection})` These indices will be `IntTensor`s, but they can be used for indexing with `torch.index_select`. - If return_depth is true: Float tensor of shape :math:`(\text{num_intersection}), 1` of entry depths to each AABB intersection. When `with_exit` is set, returns shape :math:`(\text{num_intersection}), 2` of entry and exit depths. |
4,651 | import warnings
from kaolin import _C
import torch
def mark_pack_boundaries(pack_ids):
r"""Mark the boundaries of pack IDs.
Pack IDs are sorted tensors which mark the ID of the pack each element belongs in.
For example, the SPC ray trace kernel will return the ray index tensor which marks the ID of the ray
that each intersection belongs in. This kernel will mark the beginning of each of those packs of
intersections with a boolean mask (true where the beginning is).
Args:
pack_ids (torch.Tensor): pack ids of shape :math:`(\text{num_elems})`
This can be any integral (n-bit integer) type.
Returns:
first_hits (torch.BoolTensor): the boolean mask marking the boundaries.
Examples:
>>> pack_ids = torch.IntTensor([1,1,1,1,2,2,2]).to('cuda:0')
>>> mark_pack_boundaries(pack_ids)
tensor([ True, False, False, False, True, False, False], device='cuda:0')
"""
return _C.render.spc.mark_pack_boundaries_cuda(pack_ids.contiguous()).bool()
The provided code snippet includes necessary dependencies for implementing the `mark_first_hit` function. Write a Python function `def mark_first_hit(ridx)` to solve the following problem:
r"""Mark the first hit in the nuggets. .. deprecated:: 0.10.0 This function is deprecated. Use :func:`mark_pack_boundaries`. The nuggets are a packed tensor containing correspondences from ray index to point index, sorted within each ray pack by depth. This will mark true for each first hit (by depth) for a pack of nuggets. Returns: first_hits (torch.BoolTensor): the boolean mask marking the first hit by depth.
Here is the function:
def mark_first_hit(ridx):
r"""Mark the first hit in the nuggets.
.. deprecated:: 0.10.0
This function is deprecated. Use :func:`mark_pack_boundaries`.
The nuggets are a packed tensor containing correspondences from ray index to point index, sorted
within each ray pack by depth. This will mark true for each first hit (by depth) for a pack of
nuggets.
Returns:
first_hits (torch.BoolTensor): the boolean mask marking the first hit by depth.
"""
warnings.warn("mark_first_hit has been deprecated, please use mark_pack_boundaries instead")
return mark_pack_boundaries(ridx) | r"""Mark the first hit in the nuggets. .. deprecated:: 0.10.0 This function is deprecated. Use :func:`mark_pack_boundaries`. The nuggets are a packed tensor containing correspondences from ray index to point index, sorted within each ray pack by depth. This will mark true for each first hit (by depth) for a pack of nuggets. Returns: first_hits (torch.BoolTensor): the boolean mask marking the first hit by depth. |
4,652 | import warnings
from kaolin import _C
import torch
The provided code snippet includes necessary dependencies for implementing the `diff` function. Write a Python function `def diff(feats, boundaries)` to solve the following problem:
r"""Find the delta between each of the features in a pack. The deltas are given by `out[i] = feats[i+1] - feats[i]` The behavior is similar to :func:`torch.diff` for non-packed tensors, but :func:`torch.diff` will reduce the number of features by 1. This function will instead populate the last diff with 0. Args: feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})` boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})` Given some index array marking the pack IDs, the boundaries can be calculated with :func:`mark_pack_boundaries` Returns: (torch.FloatTensor): diffed features of shape :math:`(\text{num_rays}, \text{num_feats})`
Here is the function:
def diff(feats, boundaries):
r"""Find the delta between each of the features in a pack.
The deltas are given by `out[i] = feats[i+1] - feats[i]`
The behavior is similar to :func:`torch.diff` for non-packed tensors, but :func:`torch.diff`
will reduce the number of features by 1. This function will instead populate the last diff with 0.
Args:
feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`
boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`
Given some index array marking the pack IDs, the boundaries can be calculated with
:func:`mark_pack_boundaries`
Returns:
(torch.FloatTensor): diffed features of shape :math:`(\text{num_rays}, \text{num_feats})`
"""
feats_shape = feats.shape
feat_dim = feats.shape[-1]
pack_idxes = torch.nonzero(boundaries).contiguous()[..., 0]
return _C.render.spc.diff_cuda(feats.reshape(-1, feat_dim).contiguous(), pack_idxes.contiguous()).reshape(*feats_shape) | r"""Find the delta between each of the features in a pack. The deltas are given by `out[i] = feats[i+1] - feats[i]` The behavior is similar to :func:`torch.diff` for non-packed tensors, but :func:`torch.diff` will reduce the number of features by 1. This function will instead populate the last diff with 0. Args: feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})` boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})` Given some index array marking the pack IDs, the boundaries can be calculated with :func:`mark_pack_boundaries` Returns: (torch.FloatTensor): diffed features of shape :math:`(\text{num_rays}, \text{num_feats})` |
4,653 | import warnings
from kaolin import _C
import torch
class Cumprod(torch.autograd.Function):
def forward(ctx, feats, info, exclusive, reverse):
nonzero = torch.nonzero(info).int().contiguous()[..., 0]
prod = _C.render.spc.cumprod_cuda(feats, nonzero, exclusive, reverse)
ctx.save_for_backward(feats, nonzero, prod)
ctx.flags = (exclusive, reverse)
return prod
def backward(ctx, grad_output):
prod = ctx.saved_tensors
feats, nonzero, prod = ctx.saved_tensors
exclusive, reverse = ctx.flags
out = _C.render.spc.cumsum_cuda(prod * grad_output, nonzero, exclusive, not reverse)
grad_feats = None
if ctx.needs_input_grad[0]:
# Approximate gradient (consistent with TensorFlow)
grad_feats = out / feats
grad_feats[grad_feats.isnan()] = 0
return grad_feats, None, None, None
The provided code snippet includes necessary dependencies for implementing the `cumprod` function. Write a Python function `def cumprod(feats, boundaries, exclusive=False, reverse=False)` to solve the following problem:
r"""Cumulative product across packs of features. This function is similar to :func:`tf.math.cumprod` with the same options, but for packed tensors. Refer to the TensorFlow docs for numerical examples of the options. Note that the backward gradient follows the same behaviour in TensorFlow, which is to replace NaNs by zeros, which is different from the behaviour in PyTorch. To be safe, add an epsilon to feats which will make the behaviour consistent. Args: feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`. boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`. Given some index array marking the pack IDs, the boundaries can be calculated with :func:`mark_pack_boundaries`. exclusive (bool): Compute exclusive cumprod if true. Exclusive means the current index won't be used for the calculation of the cumulative product. (Default: False) reverse (bool): Compute reverse cumprod if true, i.e. the cumulative product will start from the end of each pack, not from the beginning. (Default: False) Returns: (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`.
Here is the function:
def cumprod(feats, boundaries, exclusive=False, reverse=False):
r"""Cumulative product across packs of features.
This function is similar to :func:`tf.math.cumprod` with the same options, but for packed tensors.
Refer to the TensorFlow docs for numerical examples of the options.
Note that the backward gradient follows the same behaviour in TensorFlow, which is to
replace NaNs by zeros, which is different from the behaviour in PyTorch. To be safe,
add an epsilon to feats which will make the behaviour consistent.
Args:
feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`.
boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`.
Given some index array marking the pack IDs, the boundaries can be calculated with
:func:`mark_pack_boundaries`.
exclusive (bool): Compute exclusive cumprod if true. Exclusive means the current index won't be used
for the calculation of the cumulative product. (Default: False)
reverse (bool): Compute reverse cumprod if true, i.e. the cumulative product will start from the end of
each pack, not from the beginning. (Default: False)
Returns:
(torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`.
"""
return Cumprod.apply(feats.contiguous(), boundaries.contiguous(), exclusive, reverse) | r"""Cumulative product across packs of features. This function is similar to :func:`tf.math.cumprod` with the same options, but for packed tensors. Refer to the TensorFlow docs for numerical examples of the options. Note that the backward gradient follows the same behaviour in TensorFlow, which is to replace NaNs by zeros, which is different from the behaviour in PyTorch. To be safe, add an epsilon to feats which will make the behaviour consistent. Args: feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`. boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`. Given some index array marking the pack IDs, the boundaries can be calculated with :func:`mark_pack_boundaries`. exclusive (bool): Compute exclusive cumprod if true. Exclusive means the current index won't be used for the calculation of the cumulative product. (Default: False) reverse (bool): Compute reverse cumprod if true, i.e. the cumulative product will start from the end of each pack, not from the beginning. (Default: False) Returns: (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`. |
4,654 | import warnings
from kaolin import _C
import torch
def sum_reduce(feats, boundaries):
r"""Sum the features of packs.
Args:
feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`.
boundaries (torch.BoolTensor): bools to mark pack boundaries of shape :math:`(\text{num_rays})`.
Given some index array marking the pack IDs, the boundaries can be calculated with
:func:`mark_pack_boundaries`.
Returns:
(torch.FloatTensor): summed features of shape :math:`(\text{num_packs}, \text{num_feats})`.
"""
return SumReduce.apply(feats.contiguous(), boundaries.contiguous())
def cumsum(feats, boundaries, exclusive=False, reverse=False):
r"""Cumulative sum across packs of features.
This function is similar to :func:`tf.math.cumsum` with the same options, but for packed tensors.
Refer to the TensorFlow docs for numerical examples of the options.
Args:
feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`.
boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`.
Given some index array marking the pack IDs, the boundaries can be calculated with
:func:`mark_pack_boundaries`.
exclusive (bool): Compute exclusive cumsum if true. Exclusive means the current index won't be used
for the calculation of the cumulative sum. (Default: False)
reverse (bool): Compute reverse cumsum if true, i.e. the cumulative sum will start from the end of
each pack, not from the beginning. (Default: False)
Returns:
(torch.FloatTensor): features of shape :math:`(\text{num_rays}\, \text{num_feats})`.
"""
return Cumsum.apply(feats.contiguous(), boundaries.contiguous(), exclusive, reverse)
The provided code snippet includes necessary dependencies for implementing the `exponential_integration` function. Write a Python function `def exponential_integration(feats, tau, boundaries, exclusive=True)` to solve the following problem:
r"""Exponential transmittance integration across packs using the optical thickness (tau). Exponential transmittance is derived from the Beer-Lambert law. Typical implementations of exponential transmittance is calculated with :func:`cumprod`, but the exponential allows a reformulation as a :func:`cumsum` which its gradient is more stable and faster to compute. We opt to use the :func:`cumsum` formulation. For more details, we recommend "Monte Carlo Methods for Volumetric Light Transport" by Novak et al. Args: feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`. tau (torch.FloatTensor): optical thickness of shape :math:`(\text{num_rays}, 1)`. boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`. Given some index array marking the pack IDs, the boundaries can be calculated with :func:`mark_pack_boundaries`. exclusive (bool): Compute exclusive exponential integration if true. (default: True) Returns: (torch.FloatTensor, torch.FloatTensor) - Integrated features of shape :math:`(\text{num_packs}, \text{num_feats})`. - Transmittance of shape :math:`(\text{num_rays}, 1)`.
Here is the function:
def exponential_integration(feats, tau, boundaries, exclusive=True):
r"""Exponential transmittance integration across packs using the optical thickness (tau).
Exponential transmittance is derived from the Beer-Lambert law. Typical implementations of
exponential transmittance is calculated with :func:`cumprod`, but the exponential allows a reformulation
as a :func:`cumsum` which its gradient is more stable and faster to compute. We opt to use the :func:`cumsum`
formulation.
For more details, we recommend "Monte Carlo Methods for Volumetric Light Transport" by Novak et al.
Args:
feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`.
tau (torch.FloatTensor): optical thickness of shape :math:`(\text{num_rays}, 1)`.
boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`.
Given some index array marking the pack IDs, the boundaries can be calculated with
:func:`mark_pack_boundaries`.
exclusive (bool): Compute exclusive exponential integration if true. (default: True)
Returns:
(torch.FloatTensor, torch.FloatTensor)
- Integrated features of shape :math:`(\text{num_packs}, \text{num_feats})`.
- Transmittance of shape :math:`(\text{num_rays}, 1)`.
"""
# TODO(ttakikawa): This should be a fused kernel... we're iterating over packs, so might as well
# also perform the integration in the same manner.
alpha = 1.0 - torch.exp(-tau.contiguous())
# Uses the reformulation as a cumsum and not a cumprod (faster and more stable gradients)
transmittance = torch.exp(-1.0 * cumsum(tau.contiguous(), boundaries.contiguous(), exclusive=exclusive))
transmittance = transmittance * alpha
feats_out = sum_reduce(transmittance * feats.contiguous(), boundaries.contiguous())
return feats_out, transmittance | r"""Exponential transmittance integration across packs using the optical thickness (tau). Exponential transmittance is derived from the Beer-Lambert law. Typical implementations of exponential transmittance is calculated with :func:`cumprod`, but the exponential allows a reformulation as a :func:`cumsum` which its gradient is more stable and faster to compute. We opt to use the :func:`cumsum` formulation. For more details, we recommend "Monte Carlo Methods for Volumetric Light Transport" by Novak et al. Args: feats (torch.FloatTensor): features of shape :math:`(\text{num_rays}, \text{num_feats})`. tau (torch.FloatTensor): optical thickness of shape :math:`(\text{num_rays}, 1)`. boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`. Given some index array marking the pack IDs, the boundaries can be calculated with :func:`mark_pack_boundaries`. exclusive (bool): Compute exclusive exponential integration if true. (default: True) Returns: (torch.FloatTensor, torch.FloatTensor) - Integrated features of shape :math:`(\text{num_packs}, \text{num_feats})`. - Transmittance of shape :math:`(\text{num_rays}, 1)`. |
4,655 | import torch
from kaolin import _C
def get_shape_per_tensor(tensor_list):
r"""Returns the shape of each tensor in the tensor list except the last dimension.
See shape_per_tensor for :ref:`packed<packed_shape_per_tensor>` or :ref:`padded<padded_shape_per_tensor>`
for more information.
Args:
tensor_list (sequence of torch.Tensor): any python sequence of tensors of the identical type,
number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`.
Returns:
(torch.Tensor):
the shape of each subtensor (except for the last dim),
of shape :math:`(len(\text{tensor_list}), \text{tensor_list[0].ndim} - 1)`.
Examples:
>>> tensor_list = [
... torch.zeros((1, 3, 4, 2)),
... torch.ones((2, 5, 3, 2))
... ]
>>> get_shape_per_tensor(tensor_list)
tensor([[1, 3, 4],
[2, 5, 3]])
"""
try:
shape_per_tensor = torch.tensor([t.shape[:-1] for t in tensor_list], dtype=torch.long)
except ValueError as err:
ndim = tensor_list[0].ndim
for i, t in enumerate(tensor_list):
if t.ndim != ndim:
raise ValueError(f"Expected all tensors to have {ndim} dimensions "
f"but got {t.ndim} at index {i}")
raise err # Unknown error
return shape_per_tensor
The provided code snippet includes necessary dependencies for implementing the `list_to_packed` function. Write a Python function `def list_to_packed(tensor_list)` to solve the following problem:
r"""Converts a sequence of torch.Tensor into a single :ref:`packed tensor<packed>`. torch.Tensor of same type, number of dimensions and last dimension size will be reshaped to :math:`(-1, \text{last_dim})` and concatenated on first axis. E.g.: With input of shapes :math:`[(X_0, Y_0, Z_0, C), (X_1, Y_1, Z_1, C)]` the output packed tensor will be of shape :math:`((X_0 * Y_0 * Z_0 + X_1 * Y_1 * Z_1), C)`. The output shape_per_tensor will be the tensor: :math:`[[X_0, Y_0, Z_0], [X_1, Y_1, Z_1]]`. Args: tensor_list (sequence of torch.Tensor): any python sequence of tensors of identical type, number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`. Returns: (torch.Tensor, torch.LongTensor): the :ref:`packed tensor<packed>` and the associated :ref:`shape_per_tensor<padded_shape_per_tensor>` Example: >>> a = torch.LongTensor([[0, 1, 2], ... [1, 2, 3]]) >>> b = torch.LongTensor([[2, 4, 5]]) >>> packed_tensor, shape_per_tensor = list_to_packed([a, b]) >>> packed_tensor tensor([[0, 1, 2], [1, 2, 3], [2, 4, 5]]) >>> shape_per_tensor tensor([[2], [1]])
Here is the function:
def list_to_packed(tensor_list):
r"""Converts a sequence of torch.Tensor into a single :ref:`packed tensor<packed>`.
torch.Tensor of same type, number of dimensions and last dimension size
will be reshaped to :math:`(-1, \text{last_dim})` and concatenated on first axis.
E.g.:
With input of shapes :math:`[(X_0, Y_0, Z_0, C), (X_1, Y_1, Z_1, C)]` the output packed tensor will be
of shape :math:`((X_0 * Y_0 * Z_0 + X_1 * Y_1 * Z_1), C)`.
The output shape_per_tensor will be the tensor: :math:`[[X_0, Y_0, Z_0], [X_1, Y_1, Z_1]]`.
Args:
tensor_list (sequence of torch.Tensor): any python sequence of tensors of identical type,
number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`.
Returns:
(torch.Tensor, torch.LongTensor):
the :ref:`packed tensor<packed>` and the associated :ref:`shape_per_tensor<padded_shape_per_tensor>`
Example:
>>> a = torch.LongTensor([[0, 1, 2],
... [1, 2, 3]])
>>> b = torch.LongTensor([[2, 4, 5]])
>>> packed_tensor, shape_per_tensor = list_to_packed([a, b])
>>> packed_tensor
tensor([[0, 1, 2],
[1, 2, 3],
[2, 4, 5]])
>>> shape_per_tensor
tensor([[2],
[1]])
"""
shape_per_tensor = get_shape_per_tensor(tensor_list)
try:
output = torch.cat([t.reshape(-1, t.shape[-1]) for t in tensor_list], dim=0)
except RuntimeError as err:
last_dim = tensor_list[0].shape[-1]
t_type = tensor_list[0].type()
for i, t in enumerate(tensor_list):
if t.shape[-1] != last_dim:
raise ValueError(f"Expected all tensor to have last dimension {last_dim} "
f"but got {t.shape[-1]} at index {i}")
if t.type() != t_type:
raise ValueError(f"Expected all tensor to have type {t_type} "
f"but got {t.type()} at index {i}")
raise err # Unknown error
return output, shape_per_tensor | r"""Converts a sequence of torch.Tensor into a single :ref:`packed tensor<packed>`. torch.Tensor of same type, number of dimensions and last dimension size will be reshaped to :math:`(-1, \text{last_dim})` and concatenated on first axis. E.g.: With input of shapes :math:`[(X_0, Y_0, Z_0, C), (X_1, Y_1, Z_1, C)]` the output packed tensor will be of shape :math:`((X_0 * Y_0 * Z_0 + X_1 * Y_1 * Z_1), C)`. The output shape_per_tensor will be the tensor: :math:`[[X_0, Y_0, Z_0], [X_1, Y_1, Z_1]]`. Args: tensor_list (sequence of torch.Tensor): any python sequence of tensors of identical type, number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`. Returns: (torch.Tensor, torch.LongTensor): the :ref:`packed tensor<packed>` and the associated :ref:`shape_per_tensor<padded_shape_per_tensor>` Example: >>> a = torch.LongTensor([[0, 1, 2], ... [1, 2, 3]]) >>> b = torch.LongTensor([[2, 4, 5]]) >>> packed_tensor, shape_per_tensor = list_to_packed([a, b]) >>> packed_tensor tensor([[0, 1, 2], [1, 2, 3], [2, 4, 5]]) >>> shape_per_tensor tensor([[2], [1]]) |
4,656 | import torch
from kaolin import _C
The provided code snippet includes necessary dependencies for implementing the `packed_to_list` function. Write a Python function `def packed_to_list(packed_tensor, shape_per_tensor, first_idx)` to solve the following problem:
Converts a single packed tensor into a sequence of torch.Tensor. Args: packed_tensor (torch.Tensor): input packed tensor. shape_per_tensor (torch.LongTensor): :ref:`shape_per_tensor<packed_shape_per_tensor>` associated to the packed tensor. first_idx (torch.LongTensor): :ref:`first_idx<packed_first_idx>` associated to the packed tensor. Return: list of torch.Tensor: list of tensor unbatched from packed_tensor Example: >>> packed_tensor = torch.arange(16).reshape(8, 2) >>> packed_tensor tensor([[ 0, 1], [ 2, 3], [ 4, 5], [ 6, 7], [ 8, 9], [10, 11], [12, 13], [14, 15]]) >>> shape_per_tensor = torch.LongTensor([[3], [4], [1]]) >>> first_idx = torch.LongTensor([0, 3, 7, 8]) >>> packed_to_list(packed_tensor, shape_per_tensor, first_idx) [tensor([[0, 1], [2, 3], [4, 5]]), tensor([[ 6, 7], [ 8, 9], [10, 11], [12, 13]]), tensor([[14, 15]])]
Here is the function:
def packed_to_list(packed_tensor, shape_per_tensor, first_idx):
"""Converts a single packed tensor into a sequence of torch.Tensor.
Args:
packed_tensor (torch.Tensor): input packed tensor.
shape_per_tensor (torch.LongTensor): :ref:`shape_per_tensor<packed_shape_per_tensor>` associated to the packed tensor.
first_idx (torch.LongTensor): :ref:`first_idx<packed_first_idx>` associated to the packed tensor.
Return:
list of torch.Tensor: list of tensor unbatched from packed_tensor
Example:
>>> packed_tensor = torch.arange(16).reshape(8, 2)
>>> packed_tensor
tensor([[ 0, 1],
[ 2, 3],
[ 4, 5],
[ 6, 7],
[ 8, 9],
[10, 11],
[12, 13],
[14, 15]])
>>> shape_per_tensor = torch.LongTensor([[3], [4], [1]])
>>> first_idx = torch.LongTensor([0, 3, 7, 8])
>>> packed_to_list(packed_tensor, shape_per_tensor, first_idx)
[tensor([[0, 1],
[2, 3],
[4, 5]]), tensor([[ 6, 7],
[ 8, 9],
[10, 11],
[12, 13]]), tensor([[14, 15]])]
"""
last_dim = packed_tensor.shape[-1]
return [packed_tensor[first_id:last_id].reshape(*shape, last_dim)
for first_id, last_id, shape in zip(first_idx[:-1], first_idx[1:], shape_per_tensor)] | Converts a single packed tensor into a sequence of torch.Tensor. Args: packed_tensor (torch.Tensor): input packed tensor. shape_per_tensor (torch.LongTensor): :ref:`shape_per_tensor<packed_shape_per_tensor>` associated to the packed tensor. first_idx (torch.LongTensor): :ref:`first_idx<packed_first_idx>` associated to the packed tensor. Return: list of torch.Tensor: list of tensor unbatched from packed_tensor Example: >>> packed_tensor = torch.arange(16).reshape(8, 2) >>> packed_tensor tensor([[ 0, 1], [ 2, 3], [ 4, 5], [ 6, 7], [ 8, 9], [10, 11], [12, 13], [14, 15]]) >>> shape_per_tensor = torch.LongTensor([[3], [4], [1]]) >>> first_idx = torch.LongTensor([0, 3, 7, 8]) >>> packed_to_list(packed_tensor, shape_per_tensor, first_idx) [tensor([[0, 1], [2, 3], [4, 5]]), tensor([[ 6, 7], [ 8, 9], [10, 11], [12, 13]]), tensor([[14, 15]])] |
4,657 | import torch
from kaolin import _C
def get_shape_per_tensor(tensor_list):
r"""Returns the shape of each tensor in the tensor list except the last dimension.
See shape_per_tensor for :ref:`packed<packed_shape_per_tensor>` or :ref:`padded<padded_shape_per_tensor>`
for more information.
Args:
tensor_list (sequence of torch.Tensor): any python sequence of tensors of the identical type,
number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`.
Returns:
(torch.Tensor):
the shape of each subtensor (except for the last dim),
of shape :math:`(len(\text{tensor_list}), \text{tensor_list[0].ndim} - 1)`.
Examples:
>>> tensor_list = [
... torch.zeros((1, 3, 4, 2)),
... torch.ones((2, 5, 3, 2))
... ]
>>> get_shape_per_tensor(tensor_list)
tensor([[1, 3, 4],
[2, 5, 3]])
"""
try:
shape_per_tensor = torch.tensor([t.shape[:-1] for t in tensor_list], dtype=torch.long)
except ValueError as err:
ndim = tensor_list[0].ndim
for i, t in enumerate(tensor_list):
if t.ndim != ndim:
raise ValueError(f"Expected all tensors to have {ndim} dimensions "
f"but got {t.ndim} at index {i}")
raise err # Unknown error
return shape_per_tensor
def fill_max_shape(shape_per_tensor, partial_max_shape=None):
r"""Fills partial definition of shape to be at least as big as each shape in shape_per_tensor.
if the i-th dimension is -1 then the i-th output will be ``shape_per_tensor[:,i].max()``.
Args:
shape_per_tensor (torch.Tensor): Input :ref:`shape_per_tensor<packed_shape_per_tensor>`,
of shape :math:`(\text{N}, \text{ndim})`.
partial_max_shape (tuple, list or torch.Tensor): partially defined maximum shape,
of size ``ndim``.
Returns:
(torch.Tensor): the max_shape fully defined, of same size than ``partial_max_shape``.
Example:
>>> partial_max_shape = (6, -1, -1)
>>> shape_per_tensor = torch.LongTensor([[2, 3, 5],
... [3, 4, 2]])
>>> fill_max_shape(shape_per_tensor, partial_max_shape)
tensor([6, 4, 5])
"""
list_max_shape, idx_max_shape = torch.max(shape_per_tensor, dim=0)
if partial_max_shape is None:
max_shape = list_max_shape
else:
# Avoid inplace modification of mutable argument
if torch.is_tensor(partial_max_shape):
max_shape = partial_max_shape.clone()
else:
max_shape = torch.LongTensor(partial_max_shape)
for i, max_dim in enumerate(list_max_shape):
if max_shape[i] == -1:
max_shape[i] = max_dim
elif max_shape[i] < max_dim:
raise ValueError(f"dim {i} of max_shape ({max_shape[i]} is smaller than "
f"for tensor {idx_max_shape[i]} ({max_dim})")
return max_shape
The provided code snippet includes necessary dependencies for implementing the `list_to_padded` function. Write a Python function `def list_to_padded(tensor_list, padding_value, max_shape=None)` to solve the following problem:
r"""Converts a sequence of torch.Tensor into a single :ref:`padded tensor<padded>`. torch.Tensor of same type, number of dimensions and last dimension size will be padded and stacked on first axis. E.g.: With input of shapes :math:`[(X_0, Y_0, Z_0, C), (X_1, Y_1, Z_1, C)]` the output padded tensor will be of shape :math:`(2, max(X_0, X_1, \text{max_shape}[0]), max(Y_0, Y_1, \text{max_shape}[1]), max(Z_0, Z_1, \text{max_shape}[2]), C)` The output shape_per_tensor with be the tensor: :math:`[[X_0, Y_0, Z_0], [X_1, Y_1, Z_1]].` Args: tensor_list (sequence of torch.Tensor): any python sequence of tensors of identical type, number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`. padding_value (float): the value that will be used as padding. max_shape (list, tuple or torch.LongTensor): list of maximum value for each dim of the output shape (except batch and last axis), if a value is set to None then it will be the maximum value among the tensors. Default: All maximum values among the tensors. Return: (torch.Tensor, torch.LongTensor): the :ref:`padded tensor<padded>` and the associated :ref:`shape_per_tensor<padded_shape_per_tensor>`. Example: >>> a = torch.LongTensor([[0, 1, 2], ... [1, 2, 3]]) >>> b = torch.LongTensor([[2, 4, 5]]) >>> padded_tensor, shape_per_tensor = list_to_padded([a, b], -1, [3]) >>> padded_tensor tensor([[[ 0, 1, 2], [ 1, 2, 3], [-1, -1, -1]], <BLANKLINE> [[ 2, 4, 5], [-1, -1, -1], [-1, -1, -1]]]) >>> shape_per_tensor tensor([[2], [1]])
Here is the function:
def list_to_padded(tensor_list, padding_value, max_shape=None):
r"""Converts a sequence of torch.Tensor into a single :ref:`padded tensor<padded>`.
torch.Tensor of same type, number of dimensions and last dimension size
will be padded and stacked on first axis.
E.g.:
With input of shapes :math:`[(X_0, Y_0, Z_0, C), (X_1, Y_1, Z_1, C)]`
the output padded tensor will be of shape
:math:`(2, max(X_0, X_1, \text{max_shape}[0]),
max(Y_0, Y_1, \text{max_shape}[1]), max(Z_0, Z_1, \text{max_shape}[2]), C)`
The output shape_per_tensor with be the tensor: :math:`[[X_0, Y_0, Z_0], [X_1, Y_1, Z_1]].`
Args:
tensor_list (sequence of torch.Tensor): any python sequence of tensors of identical type,
number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`.
padding_value (float): the value that will be used as padding.
max_shape (list, tuple or torch.LongTensor): list of maximum value for each dim
of the output shape (except batch and last axis), if a value is set to None
then it will be the maximum value among the tensors.
Default: All maximum values among the tensors.
Return:
(torch.Tensor, torch.LongTensor):
the :ref:`padded tensor<padded>` and the associated :ref:`shape_per_tensor<padded_shape_per_tensor>`.
Example:
>>> a = torch.LongTensor([[0, 1, 2],
... [1, 2, 3]])
>>> b = torch.LongTensor([[2, 4, 5]])
>>> padded_tensor, shape_per_tensor = list_to_padded([a, b], -1, [3])
>>> padded_tensor
tensor([[[ 0, 1, 2],
[ 1, 2, 3],
[-1, -1, -1]],
<BLANKLINE>
[[ 2, 4, 5],
[-1, -1, -1],
[-1, -1, -1]]])
>>> shape_per_tensor
tensor([[2],
[1]])
"""
shape_per_tensor = get_shape_per_tensor(tensor_list)
batch_size = shape_per_tensor.shape[0]
last_dim = tensor_list[0].shape[-1]
max_shape = fill_max_shape(shape_per_tensor, max_shape)
output = torch.full((batch_size, *max_shape, last_dim), fill_value=padding_value,
device=tensor_list[0].device, dtype=tensor_list[0].dtype)
for i, t in enumerate(tensor_list):
output[[i] + [slice(elem_dim) for elem_dim in t.shape]] = t
return output, shape_per_tensor | r"""Converts a sequence of torch.Tensor into a single :ref:`padded tensor<padded>`. torch.Tensor of same type, number of dimensions and last dimension size will be padded and stacked on first axis. E.g.: With input of shapes :math:`[(X_0, Y_0, Z_0, C), (X_1, Y_1, Z_1, C)]` the output padded tensor will be of shape :math:`(2, max(X_0, X_1, \text{max_shape}[0]), max(Y_0, Y_1, \text{max_shape}[1]), max(Z_0, Z_1, \text{max_shape}[2]), C)` The output shape_per_tensor with be the tensor: :math:`[[X_0, Y_0, Z_0], [X_1, Y_1, Z_1]].` Args: tensor_list (sequence of torch.Tensor): any python sequence of tensors of identical type, number of dimensions, and last dimension size, e.g. :math:`[(H_0, W_0, C), (H_1, W_1, C)]`. padding_value (float): the value that will be used as padding. max_shape (list, tuple or torch.LongTensor): list of maximum value for each dim of the output shape (except batch and last axis), if a value is set to None then it will be the maximum value among the tensors. Default: All maximum values among the tensors. Return: (torch.Tensor, torch.LongTensor): the :ref:`padded tensor<padded>` and the associated :ref:`shape_per_tensor<padded_shape_per_tensor>`. Example: >>> a = torch.LongTensor([[0, 1, 2], ... [1, 2, 3]]) >>> b = torch.LongTensor([[2, 4, 5]]) >>> padded_tensor, shape_per_tensor = list_to_padded([a, b], -1, [3]) >>> padded_tensor tensor([[[ 0, 1, 2], [ 1, 2, 3], [-1, -1, -1]], <BLANKLINE> [[ 2, 4, 5], [-1, -1, -1], [-1, -1, -1]]]) >>> shape_per_tensor tensor([[2], [1]]) |
4,658 | import torch
from kaolin import _C
def padded_to_list(padded_tensor, shape_per_tensor):
"""Converts a single padded tensor into a sequence of torch.Tensor.
Args:
padded_tensor (torch.Tensor): a :ref:`padded tensor<padded>`.
shape_per_tensor (torch.LongTensor): the :ref:`shape_per_tensor<padded_shape_per_tensor>`
tensor associated to the padded tensor.
Return:
list of torch.Tensor: list of tensor unbatched from padded_tensor
Example:
>>> padded_tensor = torch.LongTensor([[[0, 1, 2],
... [1, 2, 3],
... [-1, -1, -1]],
... [[2, 4, 5],
... [-1, -1, -1],
... [-1, -1, -1]]])
>>> shape_per_tensor = torch.LongTensor([[2], [1]])
>>> padded_to_list(padded_tensor, shape_per_tensor)
[tensor([[0, 1, 2],
[1, 2, 3]]), tensor([[2, 4, 5]])]
"""
return [padded_tensor[[i] + [slice(dim) for dim in shape]]
for i, shape in enumerate(shape_per_tensor)]
The provided code snippet includes necessary dependencies for implementing the `padded_to_packed` function. Write a Python function `def padded_to_packed(padded_tensor, shape_per_tensor)` to solve the following problem:
Converts a single padded tensor into a packed tensor. Args: padded_tensor (torch.Tensor): a :ref:`padded tensor<padded>`. shape_per_tensor (torch.LongTensor): the :ref:`shape_per_tensor<padded_shape_per_tensor>` tensor associated to the padded tensor. Returns: (torch.Tensor): the :ref:`packed tensor<packed>`.
Here is the function:
def padded_to_packed(padded_tensor, shape_per_tensor):
"""Converts a single padded tensor into a packed tensor.
Args:
padded_tensor (torch.Tensor): a :ref:`padded tensor<padded>`.
shape_per_tensor (torch.LongTensor): the :ref:`shape_per_tensor<padded_shape_per_tensor>`
tensor associated to the padded tensor.
Returns:
(torch.Tensor): the :ref:`packed tensor<packed>`.
"""
return torch.cat([t.reshape(-1, padded_tensor.shape[-1])
for t in padded_to_list(padded_tensor, shape_per_tensor)], dim=0) | Converts a single padded tensor into a packed tensor. Args: padded_tensor (torch.Tensor): a :ref:`padded tensor<padded>`. shape_per_tensor (torch.LongTensor): the :ref:`shape_per_tensor<padded_shape_per_tensor>` tensor associated to the padded tensor. Returns: (torch.Tensor): the :ref:`packed tensor<packed>`. |
4,659 | from __future__ import division
import torch
The provided code snippet includes necessary dependencies for implementing the `spherical2cartesian` function. Write a Python function `def spherical2cartesian(azimuth, elevation, distance=None)` to solve the following problem:
Convert spherical coordinates to cartesian. Assuming X toward camera, Z-up and Y-right. Args: azimuth (torch.Tensor): azimuth in radianss. elevation (torch.Tensor): elevation in radians. distance (torch.Tensor or float, optional): distance. Default: 1. Returns: (torch.Tensor, torch.Tensor, torch.Tensor): x, y, z, of same shape and dtype than inputs.
Here is the function:
def spherical2cartesian(azimuth, elevation, distance=None):
"""Convert spherical coordinates to cartesian.
Assuming X toward camera, Z-up and Y-right.
Args:
azimuth (torch.Tensor): azimuth in radianss.
elevation (torch.Tensor): elevation in radians.
distance (torch.Tensor or float, optional): distance. Default: 1.
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor):
x, y, z, of same shape and dtype than inputs.
"""
if distance is None:
z = torch.sin(elevation)
temp = torch.cos(elevation)
else:
z = torch.sin(elevation) * distance
temp = torch.cos(elevation) * distance
x = torch.cos(azimuth) * temp
y = torch.sin(azimuth) * temp
return x, y, z | Convert spherical coordinates to cartesian. Assuming X toward camera, Z-up and Y-right. Args: azimuth (torch.Tensor): azimuth in radianss. elevation (torch.Tensor): elevation in radians. distance (torch.Tensor or float, optional): distance. Default: 1. Returns: (torch.Tensor, torch.Tensor, torch.Tensor): x, y, z, of same shape and dtype than inputs. |
4,660 | from __future__ import division
import torch
The provided code snippet includes necessary dependencies for implementing the `cartesian2spherical` function. Write a Python function `def cartesian2spherical(x, y, z)` to solve the following problem:
Convert cartersian coordinates to spherical in radians. Assuming X toward camera, Z-up and Y-right. Args: x (torch.Tensor): X components of the coordinates. y (torch.Tensor): Y components of the coordinates. z (torch.Tensor): Z components of the coordinates. Returns: (torch.Tensor, torch.Tensor, torch.Tensor): azimuth, elevation, distance, of same shape and dtype than inputs.
Here is the function:
def cartesian2spherical(x, y, z):
"""Convert cartersian coordinates to spherical in radians.
Assuming X toward camera, Z-up and Y-right.
Args:
x (torch.Tensor): X components of the coordinates.
y (torch.Tensor): Y components of the coordinates.
z (torch.Tensor): Z components of the coordinates.
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor):
azimuth, elevation, distance, of same shape and dtype than inputs.
"""
distance = torch.sqrt(x ** 2 + y ** 2 + z ** 2)
elevation = torch.asin(z / distance)
azimuth = torch.atan2(y, x)
return azimuth, elevation, distance | Convert cartersian coordinates to spherical in radians. Assuming X toward camera, Z-up and Y-right. Args: x (torch.Tensor): X components of the coordinates. y (torch.Tensor): Y components of the coordinates. z (torch.Tensor): Z components of the coordinates. Returns: (torch.Tensor, torch.Tensor, torch.Tensor): azimuth, elevation, distance, of same shape and dtype than inputs. |
4,661 | import numpy as np
import torch
from . import triangle_hash
from kaolin import _C
def _unbatched_check_sign_cuda(verts, faces, points):
n, _ = points.size()
points = points.contiguous()
v1 = torch.index_select(verts, 0, faces[:, 0]).view(-1, 3).contiguous()
v2 = torch.index_select(verts, 0, faces[:, 1]).view(-1, 3).contiguous()
v3 = torch.index_select(verts, 0, faces[:, 2]).view(-1, 3).contiguous()
ints = _C.ops.mesh.unbatched_mesh_intersection_cuda(points, v1, v2, v3)
contains = ints % 2 == 1.
return contains
class _UnbatchedMeshIntersector:
r"""Class to determine if a point in space lies within or outside a mesh.
"""
def __init__(self, vertices, faces, resolution=512):
triangles = vertices.data.cpu().numpy(
)[faces.data.cpu().numpy()].astype(np.float64)
n_tri = triangles.shape[0]
self.resolution = resolution
self.bbox_min = triangles.reshape(3 * n_tri, 3).min(axis=0)
self.bbox_max = triangles.reshape(3 * n_tri, 3).max(axis=0)
# Tranlate and scale it to [0.5, self.resolution - 0.5]^3
self.scale = (resolution - 1) / (self.bbox_max - self.bbox_min)
self.translate = 0.5 - self.scale * self.bbox_min
self._triangles = triangles = self.rescale(triangles)
triangles2d = triangles[:, :, :2]
self._tri_intersector2d = _TriangleIntersector2d(
triangles2d, resolution)
def query(self, points):
# Rescale points
points = self.rescale(points)
# placeholder result with no hits we'll fill in later
contains = np.zeros(len(points), dtype=bool)
# cull points outside of the axis aligned bounding box
# this avoids running ray tests unless points are close
inside_aabb = np.all(
(0 <= points) & (points <= self.resolution), axis=1)
if not inside_aabb.any():
return contains
# Only consider points inside bounding box
mask = inside_aabb
points = points[mask]
# Compute intersection depth and check order
points_indices, tri_indices = self._tri_intersector2d.query(
points[:, :2])
triangles_intersect = self._triangles[tri_indices]
points_intersect = points[points_indices]
depth_intersect, abs_n_2 = self.compute_intersection_depth(
points_intersect, triangles_intersect)
# Count number of intersections in both directions
smaller_depth = depth_intersect >= points_intersect[:, 2] * abs_n_2
bigger_depth = depth_intersect < points_intersect[:, 2] * abs_n_2
points_indices_0 = points_indices[smaller_depth]
points_indices_1 = points_indices[bigger_depth]
nintersect0 = np.bincount(points_indices_0, minlength=points.shape[0])
nintersect1 = np.bincount(points_indices_1, minlength=points.shape[0])
# Check if point contained in mesh
contains1 = (np.mod(nintersect0, 2) == 1)
contains2 = (np.mod(nintersect1, 2) == 1)
# if (contains1 != contains2).any():
# print('Warning: contains1 != contains2 for some points.')
contains[mask] = (contains1 & contains2)
return contains
def compute_intersection_depth(self, points, triangles):
t1 = triangles[:, 0, :]
t2 = triangles[:, 1, :]
t3 = triangles[:, 2, :]
v1 = t3 - t1
v2 = t2 - t1
# v1 = v1 / np.linalg.norm(v1, axis=-1, keepdims=True)
# v2 = v2 / np.linalg.norm(v2, axis=-1, keepdims=True)
normals = np.cross(v1, v2)
alpha = np.sum(normals[:, :2] * (t1[:, :2] - points[:, :2]), axis=1)
n_2 = normals[:, 2]
t1_2 = t1[:, 2]
s_n_2 = np.sign(n_2)
abs_n_2 = np.abs(n_2)
mask = (abs_n_2 != 0)
depth_intersect = np.full(points.shape[0], np.nan)
depth_intersect[mask] = \
t1_2[mask] * abs_n_2[mask] + alpha[mask] * s_n_2[mask]
return depth_intersect, abs_n_2
def rescale(self, array):
array = self.scale * array + self.translate
return array
The provided code snippet includes necessary dependencies for implementing the `check_sign` function. Write a Python function `def check_sign(verts, faces, points, hash_resolution=512)` to solve the following problem:
r"""Checks if a set of points is contained inside a watertight triangle mesh. Shoots a ray from each point to be checked and calculates the number of intersections between the ray and triangles in the mesh. Uses the parity of the number of intersections to determine if the point is inside the mesh. Args: verts (torch.Tensor): Vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.Tensor): Faces, of shape :math:`(\text{num_faces}, 3)`. points (torch.Tensor): Points to check, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. hash_resolution (int): Resolution used to check the points sign. Only used with CPU. Default: 512. Returns: (torch.BoolTensor): Tensor indicating whether each point is inside the mesh, of shape :math:`(\text{batch_size}, \text{num_points})`. Example: >>> device = 'cuda' if torch.cuda.is_available() else 'cpu' >>> verts = torch.tensor([[[0., 0., 0.], ... [1., 0.5, 1.], ... [0.5, 1., 1.], ... [1., 1., 0.5]]], device = device) >>> faces = torch.tensor([[0, 3, 1], ... [0, 1, 2], ... [0, 2, 3], ... [3, 2, 1]], device = device) >>> axis = torch.linspace(0.1, 0.9, 3, device = device) >>> p_x, p_y, p_z = torch.meshgrid(axis + 0.01, axis + 0.02, axis + 0.03) >>> points = torch.cat((p_x.unsqueeze(-1), p_y.unsqueeze(-1), p_z.unsqueeze(-1)), dim=3) >>> points = points.view(1, -1, 3) >>> check_sign(verts, faces, points) tensor([[ True, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, True, False, False, False, False, False, True, False, True, False]], device='cuda:0')
Here is the function:
def check_sign(verts, faces, points, hash_resolution=512):
r"""Checks if a set of points is contained inside a watertight triangle mesh.
Shoots a ray from each point to be checked
and calculates the number of intersections
between the ray and triangles in the mesh.
Uses the parity of the number of intersections
to determine if the point is inside the mesh.
Args:
verts (torch.Tensor):
Vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.Tensor):
Faces, of shape :math:`(\text{num_faces}, 3)`.
points (torch.Tensor):
Points to check, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`.
hash_resolution (int):
Resolution used to check the points sign. Only used with CPU.
Default: 512.
Returns:
(torch.BoolTensor):
Tensor indicating whether each point is inside the mesh,
of shape :math:`(\text{batch_size}, \text{num_points})`.
Example:
>>> device = 'cuda' if torch.cuda.is_available() else 'cpu'
>>> verts = torch.tensor([[[0., 0., 0.],
... [1., 0.5, 1.],
... [0.5, 1., 1.],
... [1., 1., 0.5]]], device = device)
>>> faces = torch.tensor([[0, 3, 1],
... [0, 1, 2],
... [0, 2, 3],
... [3, 2, 1]], device = device)
>>> axis = torch.linspace(0.1, 0.9, 3, device = device)
>>> p_x, p_y, p_z = torch.meshgrid(axis + 0.01, axis + 0.02, axis + 0.03)
>>> points = torch.cat((p_x.unsqueeze(-1), p_y.unsqueeze(-1), p_z.unsqueeze(-1)), dim=3)
>>> points = points.view(1, -1, 3)
>>> check_sign(verts, faces, points)
tensor([[ True, False, False, False, False, False, False, False, False, False,
False, False, False, True, False, False, False, True, False, False,
False, False, False, True, False, True, False]], device='cuda:0')
"""
assert verts.device == points.device
assert faces.device == points.device
device = points.device
if not faces.dtype == torch.int64:
raise TypeError(f"Expected faces entries to be torch.int64 "
f"but got {faces.dtype}.")
if not isinstance(hash_resolution, int):
raise TypeError(f"Expected hash_resolution to be int "
f"but got {type(hash_resolution)}.")
if verts.ndim != 3:
verts_dim = verts.ndim
raise ValueError(f"Expected verts to have 3 dimensions "
f"but got {verts_dim} dimensions.")
if faces.ndim != 2:
faces_dim = faces.ndim
raise ValueError(f"Expected faces to have 2 dimensions "
f"but got {faces_dim} dimensions.")
if points.ndim != 3:
points_dim = points.ndim
raise ValueError(f"Expected points to have 3 dimensions "
f"but got {points_dim} dimensions.")
if verts.shape[2] != 3:
raise ValueError(f"Expected verts to have 3 coordinates "
f"but got {verts.shape[2]} coordinates.")
if faces.shape[1] != 3:
raise ValueError(f"Expected faces to have 3 vertices "
f"but got {faces.shape[1]} vertices.")
if points.shape[2] != 3:
raise ValueError(f"Expected points to have 3 coordinates "
f"but got {points.shape[2]} coordinates.")
xlen = verts[..., 0].max(-1)[0] - verts[..., 0].min(-1)[0]
ylen = verts[..., 1].max(-1)[0] - verts[..., 1].min(-1)[0]
zlen = verts[..., 2].max(-1)[0] - verts[..., 2].min(-1)[0]
maxlen = torch.max(torch.stack([xlen, ylen, zlen]), 0)[0]
verts = verts / maxlen.view(-1, 1, 1)
points = points / maxlen.view(-1, 1, 1)
results = []
if device.type == 'cuda':
for i_batch in range(verts.shape[0]):
contains = _unbatched_check_sign_cuda(verts[i_batch], faces, points[i_batch])
results.append(contains)
else:
for i_batch in range(verts.shape[0]):
intersector = _UnbatchedMeshIntersector(verts[i_batch], faces, hash_resolution)
contains = intersector.query(points[i_batch].data.cpu().numpy())
results.append(torch.tensor(contains).to(device))
return torch.stack(results) | r"""Checks if a set of points is contained inside a watertight triangle mesh. Shoots a ray from each point to be checked and calculates the number of intersections between the ray and triangles in the mesh. Uses the parity of the number of intersections to determine if the point is inside the mesh. Args: verts (torch.Tensor): Vertices, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.Tensor): Faces, of shape :math:`(\text{num_faces}, 3)`. points (torch.Tensor): Points to check, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. hash_resolution (int): Resolution used to check the points sign. Only used with CPU. Default: 512. Returns: (torch.BoolTensor): Tensor indicating whether each point is inside the mesh, of shape :math:`(\text{batch_size}, \text{num_points})`. Example: >>> device = 'cuda' if torch.cuda.is_available() else 'cpu' >>> verts = torch.tensor([[[0., 0., 0.], ... [1., 0.5, 1.], ... [0.5, 1., 1.], ... [1., 1., 0.5]]], device = device) >>> faces = torch.tensor([[0, 3, 1], ... [0, 1, 2], ... [0, 2, 3], ... [3, 2, 1]], device = device) >>> axis = torch.linspace(0.1, 0.9, 3, device = device) >>> p_x, p_y, p_z = torch.meshgrid(axis + 0.01, axis + 0.02, axis + 0.03) >>> points = torch.cat((p_x.unsqueeze(-1), p_y.unsqueeze(-1), p_z.unsqueeze(-1)), dim=3) >>> points = points.view(1, -1, 3) >>> check_sign(verts, faces, points) tensor([[ True, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, True, False, False, False, False, False, True, False, True, False]], device='cuda:0') |
4,662 | import torch
The provided code snippet includes necessary dependencies for implementing the `index_vertices_by_faces` function. Write a Python function `def index_vertices_by_faces(vertices_features, faces)` to solve the following problem:
r"""Index vertex features to convert per vertex tensor to per vertex per face tensor. Args: vertices_features (torch.FloatTensor): vertices features, of shape :math:`(\text{batch_size}, \text{num_points}, \text{knum})`, ``knum`` is feature dimension, the features could be xyz position, rgb color, or even neural network features. faces (torch.LongTensor): face index, of shape :math:`(\text{num_faces}, \text{num_vertices})`. Returns: (torch.FloatTensor): the face features, of shape :math:`(\text{batch_size}, \text{num_faces}, \text{num_vertices}, \text{knum})`.
Here is the function:
def index_vertices_by_faces(vertices_features, faces):
r"""Index vertex features to convert per vertex tensor to per vertex per face tensor.
Args:
vertices_features (torch.FloatTensor):
vertices features, of shape
:math:`(\text{batch_size}, \text{num_points}, \text{knum})`,
``knum`` is feature dimension, the features could be xyz position,
rgb color, or even neural network features.
faces (torch.LongTensor):
face index, of shape :math:`(\text{num_faces}, \text{num_vertices})`.
Returns:
(torch.FloatTensor):
the face features, of shape
:math:`(\text{batch_size}, \text{num_faces}, \text{num_vertices}, \text{knum})`.
"""
assert vertices_features.ndim == 3, \
"vertices_features must have 3 dimensions of shape (batch_size, num_points, knum)"
assert faces.ndim == 2, "faces must have 2 dimensions of shape (num_faces, num_vertices)"
input = vertices_features.unsqueeze(2).expand(-1, -1, faces.shape[-1], -1)
indices = faces[None, ..., None].expand(vertices_features.shape[0], -1, -1, vertices_features.shape[-1])
return torch.gather(input=input, index=indices, dim=1) | r"""Index vertex features to convert per vertex tensor to per vertex per face tensor. Args: vertices_features (torch.FloatTensor): vertices features, of shape :math:`(\text{batch_size}, \text{num_points}, \text{knum})`, ``knum`` is feature dimension, the features could be xyz position, rgb color, or even neural network features. faces (torch.LongTensor): face index, of shape :math:`(\text{num_faces}, \text{num_vertices})`. Returns: (torch.FloatTensor): the face features, of shape :math:`(\text{batch_size}, \text{num_faces}, \text{num_vertices}, \text{knum})`. |
4,663 | import torch
def adjacency_matrix(num_vertices, faces, sparse=True):
r"""Calculates a adjacency matrix of a mesh.
Args:
num_vertices (int): Number of vertices of the mesh.
faces (torch.LongTensor):
Faces of shape :math:`(\text{num_faces}, \text{face_size})` of the mesh.
sparse (bool): Whether to return a sparse tensor or not. Default: True.
Returns:
(torch.FloatTensor or torch.sparse.FloatTensor): adjacency matrix
Example:
>>> faces = torch.tensor([[0, 1, 2]])
>>> adjacency_matrix(3, faces)
tensor(indices=tensor([[0, 0, 1, 1, 2, 2],
[1, 2, 0, 2, 0, 1]]),
values=tensor([1., 1., 1., 1., 1., 1.]),
size=(3, 3), nnz=6, layout=torch.sparse_coo)
"""
device = faces.device
forward_i = torch.stack([faces, torch.roll(faces, 1, dims=-1)], dim=-1)
backward_i = torch.stack([torch.roll(faces, 1, dims=-1), faces], dim=-1)
indices = torch.cat([forward_i, backward_i], dim=1).reshape(-1, 2)
indices = indices.unique(dim=0)
if sparse:
indices = indices.t()
# If vertex i and j have an edge connect to it, A[i, j] = 1
values = torch.ones(indices.shape[1], device=device)
adjacency = torch.sparse.FloatTensor(indices, values, (num_vertices, num_vertices))
else:
adjacency = torch.zeros((num_vertices, num_vertices), device=device, dtype=torch.float)
adjacency[indices[:, 0], indices[:, 1]] = 1
return adjacency
The provided code snippet includes necessary dependencies for implementing the `uniform_laplacian` function. Write a Python function `def uniform_laplacian(num_vertices, faces)` to solve the following problem:
r"""Calculates the uniform laplacian of a mesh. :math:`L[i, j] = \frac{1}{num\_neighbours(i)}` if i, j are neighbours. :math:`L[i, j] = -1` if i == j. :math:`L[i, j] = 0` otherwise. Args: num_vertices (int): Number of vertices for the mesh. faces (torch.LongTensor): Faces of shape :math:`(\text{num_faces}, \text{face_size})` of the mesh. Returns: (torch.Tensor): Uniform laplacian of the mesh of size :math:`(\text{num_vertices}, \text{num_vertices})` Example: >>> faces = torch.tensor([[0, 1, 2]]) >>> uniform_laplacian(3, faces) tensor([[-1.0000, 0.5000, 0.5000], [ 0.5000, -1.0000, 0.5000], [ 0.5000, 0.5000, -1.0000]])
Here is the function:
def uniform_laplacian(num_vertices, faces):
r"""Calculates the uniform laplacian of a mesh.
:math:`L[i, j] = \frac{1}{num\_neighbours(i)}` if i, j are neighbours.
:math:`L[i, j] = -1` if i == j.
:math:`L[i, j] = 0` otherwise.
Args:
num_vertices (int): Number of vertices for the mesh.
faces (torch.LongTensor):
Faces of shape :math:`(\text{num_faces}, \text{face_size})` of the mesh.
Returns:
(torch.Tensor):
Uniform laplacian of the mesh of size :math:`(\text{num_vertices}, \text{num_vertices})`
Example:
>>> faces = torch.tensor([[0, 1, 2]])
>>> uniform_laplacian(3, faces)
tensor([[-1.0000, 0.5000, 0.5000],
[ 0.5000, -1.0000, 0.5000],
[ 0.5000, 0.5000, -1.0000]])
"""
batch_size = faces.shape[0]
dense_adjacency = adjacency_matrix(num_vertices, faces).to_dense()
# Compute the number of neighbours of each vertex
num_neighbour = torch.sum(dense_adjacency, dim=1).view(-1, 1)
L = torch.div(dense_adjacency, num_neighbour)
torch.diagonal(L)[:] = -1
# Fill NaN value with 0
L[torch.isnan(L)] = 0
return L | r"""Calculates the uniform laplacian of a mesh. :math:`L[i, j] = \frac{1}{num\_neighbours(i)}` if i, j are neighbours. :math:`L[i, j] = -1` if i == j. :math:`L[i, j] = 0` otherwise. Args: num_vertices (int): Number of vertices for the mesh. faces (torch.LongTensor): Faces of shape :math:`(\text{num_faces}, \text{face_size})` of the mesh. Returns: (torch.Tensor): Uniform laplacian of the mesh of size :math:`(\text{num_vertices}, \text{num_vertices})` Example: >>> faces = torch.tensor([[0, 1, 2]]) >>> uniform_laplacian(3, faces) tensor([[-1.0000, 0.5000, 0.5000], [ 0.5000, -1.0000, 0.5000], [ 0.5000, 0.5000, -1.0000]]) |
4,664 | import torch
The provided code snippet includes necessary dependencies for implementing the `compute_vertex_normals` function. Write a Python function `def compute_vertex_normals(faces, face_normals, num_vertices=None)` to solve the following problem:
r"""Computes normals for every vertex by averaging face normals assigned to that vertex for every face that has this vertex. Args: faces (torch.LongTensor): vertex indices of faces of a fixed-topology mesh batch with shape :math:`(\text{num_faces}, \text{face_size})`. face_normals (torch.FloatTensor): pre-normalized xyz normal values for every vertex of every face with shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 3)`. num_vertices (int, optional): number of vertices V (set to max index in faces, if not set) Return: (torch.FloatTensor): of shape (B, V, 3)
Here is the function:
def compute_vertex_normals(faces, face_normals, num_vertices=None):
r"""Computes normals for every vertex by averaging face normals
assigned to that vertex for every face that has this vertex.
Args:
faces (torch.LongTensor): vertex indices of faces of a fixed-topology mesh batch with
shape :math:`(\text{num_faces}, \text{face_size})`.
face_normals (torch.FloatTensor): pre-normalized xyz normal values
for every vertex of every face with shape
:math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 3)`.
num_vertices (int, optional): number of vertices V (set to max index in faces, if not set)
Return:
(torch.FloatTensor): of shape (B, V, 3)
"""
if num_vertices is None:
num_vertices = int(faces.max()) + 1
B = face_normals.shape[0]
V = num_vertices
F = faces.shape[0]
FSz = faces.shape[1]
vertex_normals = torch.zeros((B, V, 3), dtype=face_normals.dtype, device=face_normals.device)
counts = torch.zeros((B, V), dtype=face_normals.dtype, device=face_normals.device)
faces = faces.unsqueeze(0).repeat(B, 1, 1)
fake_counts = torch.ones((B, F), dtype=face_normals.dtype, device=face_normals.device)
# B x F B x F x 3
# self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
# self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
for i in range(FSz):
vertex_normals.scatter_add_(1, faces[..., i:i + 1].repeat(1, 1, 3), face_normals[..., i, :])
counts.scatter_add_(1, faces[..., i], fake_counts)
counts = counts.clip(min=1).unsqueeze(-1)
vertex_normals = vertex_normals / counts
return vertex_normals | r"""Computes normals for every vertex by averaging face normals assigned to that vertex for every face that has this vertex. Args: faces (torch.LongTensor): vertex indices of faces of a fixed-topology mesh batch with shape :math:`(\text{num_faces}, \text{face_size})`. face_normals (torch.FloatTensor): pre-normalized xyz normal values for every vertex of every face with shape :math:`(\text{batch_size}, \text{num_faces}, \text{face_size}, 3)`. num_vertices (int, optional): number of vertices V (set to max index in faces, if not set) Return: (torch.FloatTensor): of shape (B, V, 3) |
4,665 | import math
import torch
from ..batch import tile_to_packed, packed_to_padded, get_first_idx
def _base_face_areas(face_vertices_0, face_vertices_1, face_vertices_2):
"""Base function to compute the face areas."""
x1, x2, x3 = torch.split(face_vertices_0 - face_vertices_1, 1, dim=-1)
y1, y2, y3 = torch.split(face_vertices_1 - face_vertices_2, 1, dim=-1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
areas = torch.sqrt(a + b + c) * 0.5
return areas
The provided code snippet includes necessary dependencies for implementing the `face_areas` function. Write a Python function `def face_areas(vertices, faces)` to solve the following problem:
Compute the areas of each face of triangle meshes. Args: vertices (torch.Tensor): The vertices of the meshes, of shape :math:`(\\text{batch_size}, \\text{num_vertices}, 3)`. faces (torch.LongTensor): the faces of the meshes, of shape :math:`(\\text{num_faces}, 3)`. Returns: (torch.Tensor): the face areas of same type as vertices and of shape :math:`(\\text{batch_size}, \\text{num_faces})`.
Here is the function:
def face_areas(vertices, faces):
"""Compute the areas of each face of triangle meshes.
Args:
vertices (torch.Tensor):
The vertices of the meshes,
of shape :math:`(\\text{batch_size}, \\text{num_vertices}, 3)`.
faces (torch.LongTensor):
the faces of the meshes, of shape :math:`(\\text{num_faces}, 3)`.
Returns:
(torch.Tensor):
the face areas of same type as vertices and of shape
:math:`(\\text{batch_size}, \\text{num_faces})`.
"""
if faces.shape[-1] != 3:
raise NotImplementedError("face_areas is only implemented for triangle meshes")
faces_0, faces_1, faces_2 = torch.split(faces, 1, dim=1)
face_v_0 = torch.index_select(vertices, 1, faces_0.reshape(-1))
face_v_1 = torch.index_select(vertices, 1, faces_1.reshape(-1))
face_v_2 = torch.index_select(vertices, 1, faces_2.reshape(-1))
areas = _base_face_areas(face_v_0, face_v_1, face_v_2)
return areas.squeeze(-1) | Compute the areas of each face of triangle meshes. Args: vertices (torch.Tensor): The vertices of the meshes, of shape :math:`(\\text{batch_size}, \\text{num_vertices}, 3)`. faces (torch.LongTensor): the faces of the meshes, of shape :math:`(\\text{num_faces}, 3)`. Returns: (torch.Tensor): the face areas of same type as vertices and of shape :math:`(\\text{batch_size}, \\text{num_faces})`. |
4,666 | import math
import torch
from ..batch import tile_to_packed, packed_to_padded, get_first_idx
def _base_face_areas(face_vertices_0, face_vertices_1, face_vertices_2):
"""Base function to compute the face areas."""
x1, x2, x3 = torch.split(face_vertices_0 - face_vertices_1, 1, dim=-1)
y1, y2, y3 = torch.split(face_vertices_1 - face_vertices_2, 1, dim=-1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
areas = torch.sqrt(a + b + c) * 0.5
return areas
def tile_to_packed(values, numel_per_tensor):
r"""Tiles values to a packed representation of numel_per_tensor,
Args:
values (torch.Tensor): tensor of shape :math:`(\text{batch_size},)` of values to be tiled.
numel_per_tensor (torch.LongTensor): number of elements per tensor of the output packed tensor.
Return:
torch.Tensor:
The :ref:`packed tensor<packed>` of tiled values of shape
:math:`(sum(\text{numel_per_tensor}), 1)`.
Example:
>>> values = torch.tensor([0., 6., 7.])
>>> numel_per_tensor = torch.LongTensor([2, 2, 3])
>>> tile_to_packed(values, numel_per_tensor)
tensor([[0.],
[0.],
[6.],
[6.],
[7.],
[7.],
[7.]])
"""
if torch.cuda.is_available() and values.is_cuda and not numel_per_tensor.is_cuda:
# TODO(cfujitsang): this could be externalized with lazy initialization
# currently kept inside as the slowdown is still reasonable
total_numel = torch.sum(numel_per_tensor)
tiled_packed_tensor = _TileToPackedCuda.apply(values, numel_per_tensor, total_numel)
else:
tiled_packed_tensor = torch.cat(
[torch.full((int(numel),), fill_value=value.item(), dtype=values.dtype, device=values.device)
for value, numel in zip(values, numel_per_tensor)], dim=0).unsqueeze(-1)
return tiled_packed_tensor
The provided code snippet includes necessary dependencies for implementing the `packed_face_areas` function. Write a Python function `def packed_face_areas(vertices, first_idx_vertices, faces, num_faces_per_mesh)` to solve the following problem:
Compute the areas of each face of triangle meshes. Args: vertices (torch.Tensor): The packed vertices of the meshes, of shape :math:`(\\text{num_vertices}, 3)`. first_idx_vertices (torch.Tensor): The :ref:`first_idx<packed_first_idx>` associated to vertices, of shape :math:`(\\text{batch_size})`. faces (torch.LongTensor): The packed faces of the meshes, of shape :math:`(\\text{num_faces}, 3)`. num_faces_per_mesh: The number of faces per mesh, of shape :math:`(\\text{batch_size})`. Returns: (torch.Tensor): The face areas of same type as vertices and of shape :math:`(\\text{num_faces})`.
Here is the function:
def packed_face_areas(vertices, first_idx_vertices, faces, num_faces_per_mesh):
"""Compute the areas of each face of triangle meshes.
Args:
vertices (torch.Tensor):
The packed vertices of the meshes, of shape :math:`(\\text{num_vertices}, 3)`.
first_idx_vertices (torch.Tensor):
The :ref:`first_idx<packed_first_idx>` associated to vertices,
of shape :math:`(\\text{batch_size})`.
faces (torch.LongTensor):
The packed faces of the meshes, of shape :math:`(\\text{num_faces}, 3)`.
num_faces_per_mesh:
The number of faces per mesh, of shape :math:`(\\text{batch_size})`.
Returns:
(torch.Tensor):
The face areas of same type as vertices
and of shape :math:`(\\text{num_faces})`.
"""
if faces.shape[-1] != 3:
raise NotImplementedError("packed_face_areas is only implemented for triangle meshes")
merged_faces = tile_to_packed(first_idx_vertices[:-1].to(vertices.device),
num_faces_per_mesh) + faces
faces_0, faces_1, faces_2 = torch.split(merged_faces, 1, dim=1)
face_v_0 = torch.index_select(vertices, 0, faces_0.reshape(-1))
face_v_1 = torch.index_select(vertices, 0, faces_1.reshape(-1))
face_v_2 = torch.index_select(vertices, 0, faces_2.reshape(-1))
areas = _base_face_areas(face_v_0, face_v_1, face_v_2)
return areas.view(-1) | Compute the areas of each face of triangle meshes. Args: vertices (torch.Tensor): The packed vertices of the meshes, of shape :math:`(\\text{num_vertices}, 3)`. first_idx_vertices (torch.Tensor): The :ref:`first_idx<packed_first_idx>` associated to vertices, of shape :math:`(\\text{batch_size})`. faces (torch.LongTensor): The packed faces of the meshes, of shape :math:`(\\text{num_faces}, 3)`. num_faces_per_mesh: The number of faces per mesh, of shape :math:`(\\text{batch_size})`. Returns: (torch.Tensor): The face areas of same type as vertices and of shape :math:`(\\text{num_faces})`. |
4,667 | import math
import torch
from ..batch import tile_to_packed, packed_to_padded, get_first_idx
def _base_face_areas(face_vertices_0, face_vertices_1, face_vertices_2):
"""Base function to compute the face areas."""
x1, x2, x3 = torch.split(face_vertices_0 - face_vertices_1, 1, dim=-1)
y1, y2, y3 = torch.split(face_vertices_1 - face_vertices_2, 1, dim=-1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
areas = torch.sqrt(a + b + c) * 0.5
return areas
def _base_sample_points_selected_faces(face_vertices, face_features=None):
"""Base function to sample points over selected faces.
The coordinates of the face vertices are interpolated to generate new samples.
Args:
face_vertices (tuple of torch.Tensor):
Coordinates of vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
face_features (tuple of torch.Tensor, Optional):
Features of face vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
Returns:
(torch.Tensor, torch.Tensor):
Sampled point coordinates of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
Sampled points interpolated features of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
If `face_vertices_features` arg is not specified, the returned interpolated features are None.
"""
face_vertices0, face_vertices1, face_vertices2 = face_vertices
sampling_shape = tuple(int(d) for d in face_vertices0.shape[:-1]) + (1,)
# u is proximity to middle point between v1 and v2 against v0.
# v is proximity to v2 against v1.
#
# The probability density for u should be f_U(u) = 2u.
# However, torch.rand use a uniform (f_X(x) = x) distribution,
# so using torch.sqrt we make a change of variable to have the desired density
# f_Y(y) = f_X(y ^ 2) * |d(y ^ 2) / dy| = 2y
u = torch.sqrt(torch.rand(sampling_shape,
device=face_vertices0.device,
dtype=face_vertices0.dtype))
v = torch.rand(sampling_shape,
device=face_vertices0.device,
dtype=face_vertices0.dtype)
w0 = 1 - u
w1 = u * (1 - v)
w2 = u * v
points = w0 * face_vertices0 + w1 * face_vertices1 + w2 * face_vertices2
features = None
if face_features is not None:
face_features0, face_features1, face_features2 = face_features
features = w0 * face_features0 + w1 * face_features1 + \
w2 * face_features2
return points, features
The provided code snippet includes necessary dependencies for implementing the `sample_points` function. Write a Python function `def sample_points(vertices, faces, num_samples, areas=None, face_features=None)` to solve the following problem:
r"""Uniformly sample points over the surface of triangle meshes. First face on which the point is sampled is randomly selected, with the probability of selection being proportional to the area of the face. then the coordinate on the face is uniformly sampled. If ``face_features`` is defined for the mesh faces, the sampled points will be returned with interpolated features as well, otherwise, no feature interpolation will occur. Args: vertices (torch.Tensor): The vertices of the meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): The faces of the mesh, of shape :math:`(\text{num_faces}, 3)`. num_samples (int): The number of point sampled per mesh. areas (torch.Tensor, optional): The areas of each face, of shape :math:`(\text{batch_size}, \text{num_faces})`, can be preprocessed, for fast on-the-fly sampling, will be computed if None (default). face_features (torch.Tensor, optional): Per-vertex-per-face features, matching ``faces`` order, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`. For example: 1. Texture uv coordinates would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`. 2. RGB color values would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. When specified, it is used to interpolate the features for new sampled points. See also: :func:`~kaolin.ops.mesh.index_vertices_by_faces` for conversion of features defined per vertex and need to be converted to per-vertex-per-face shape of :math:`(\text{num_faces}, 3)`. Returns: (torch.Tensor, torch.LongTensor, (optional) torch.Tensor): the pointclouds of shape :math:`(\text{batch_size}, \text{num_samples}, 3)`, and the indexes of the faces selected, of shape :math:`(\text{batch_size}, \text{num_samples})`. If ``face_features`` arg is specified, then the interpolated features of sampled points of shape :math:`(\text{batch_size}, \text{num_samples}, \text{feature_dim})` are also returned.
Here is the function:
def sample_points(vertices, faces, num_samples, areas=None, face_features=None):
r"""Uniformly sample points over the surface of triangle meshes.
First face on which the point is sampled is randomly selected,
with the probability of selection being proportional to the area of the face.
then the coordinate on the face is uniformly sampled.
If ``face_features`` is defined for the mesh faces,
the sampled points will be returned with interpolated features as well,
otherwise, no feature interpolation will occur.
Args:
vertices (torch.Tensor):
The vertices of the meshes, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor):
The faces of the mesh, of shape :math:`(\text{num_faces}, 3)`.
num_samples (int):
The number of point sampled per mesh.
areas (torch.Tensor, optional):
The areas of each face, of shape :math:`(\text{batch_size}, \text{num_faces})`,
can be preprocessed, for fast on-the-fly sampling,
will be computed if None (default).
face_features (torch.Tensor, optional):
Per-vertex-per-face features, matching ``faces`` order,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`.
For example:
1. Texture uv coordinates would be of shape
:math:`(\text{batch_size}, \text{num_faces}, 3, 2)`.
2. RGB color values would be of shape
:math:`(\text{batch_size}, \text{num_faces}, 3, 3)`.
When specified, it is used to interpolate the features for new sampled points.
See also:
:func:`~kaolin.ops.mesh.index_vertices_by_faces` for conversion of features defined per vertex
and need to be converted to per-vertex-per-face shape of :math:`(\text{num_faces}, 3)`.
Returns:
(torch.Tensor, torch.LongTensor, (optional) torch.Tensor):
the pointclouds of shape :math:`(\text{batch_size}, \text{num_samples}, 3)`,
and the indexes of the faces selected,
of shape :math:`(\text{batch_size}, \text{num_samples})`.
If ``face_features`` arg is specified, then the interpolated features of sampled points of shape
:math:`(\text{batch_size}, \text{num_samples}, \text{feature_dim})` are also returned.
"""
if faces.shape[-1] != 3:
raise NotImplementedError("sample_points is only implemented for triangle meshes")
faces_0, faces_1, faces_2 = torch.split(faces, 1, dim=1) # (num_faces, 3) -> tuple of (num_faces,)
face_v_0 = torch.index_select(vertices, 1, faces_0.reshape(-1)) # (batch_size, num_faces, 3)
face_v_1 = torch.index_select(vertices, 1, faces_1.reshape(-1)) # (batch_size, num_faces, 3)
face_v_2 = torch.index_select(vertices, 1, faces_2.reshape(-1)) # (batch_size, num_faces, 3)
if areas is None:
areas = _base_face_areas(face_v_0, face_v_1, face_v_2).squeeze(-1)
face_dist = torch.distributions.Categorical(areas)
face_choices = face_dist.sample([num_samples]).transpose(0, 1)
_face_choices = face_choices.unsqueeze(-1).repeat(1, 1, 3)
v0 = torch.gather(face_v_0, 1, _face_choices) # (batch_size, num_samples, 3)
v1 = torch.gather(face_v_1, 1, _face_choices) # (batch_size, num_samples, 3)
v2 = torch.gather(face_v_2, 1, _face_choices) # (batch_size, num_samples, 3)
face_vertices_choices = (v0, v1, v2)
# UV coordinates are available, make sure to calculate them for sampled points as well
face_features_choices = None
if face_features is not None:
feat_dim = face_features.shape[-1]
# (num_faces, 3) -> tuple of (num_faces,)
_face_choices = face_choices[..., None, None].repeat(1, 1, 3, feat_dim)
face_features_choices = torch.gather(face_features, 1, _face_choices)
face_features_choices = tuple(
tmp_feat.squeeze(2) for tmp_feat in torch.split(face_features_choices, 1, dim=2))
points, point_features = _base_sample_points_selected_faces(
face_vertices_choices, face_features_choices)
if point_features is not None:
return points, face_choices, point_features
else:
return points, face_choices | r"""Uniformly sample points over the surface of triangle meshes. First face on which the point is sampled is randomly selected, with the probability of selection being proportional to the area of the face. then the coordinate on the face is uniformly sampled. If ``face_features`` is defined for the mesh faces, the sampled points will be returned with interpolated features as well, otherwise, no feature interpolation will occur. Args: vertices (torch.Tensor): The vertices of the meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): The faces of the mesh, of shape :math:`(\text{num_faces}, 3)`. num_samples (int): The number of point sampled per mesh. areas (torch.Tensor, optional): The areas of each face, of shape :math:`(\text{batch_size}, \text{num_faces})`, can be preprocessed, for fast on-the-fly sampling, will be computed if None (default). face_features (torch.Tensor, optional): Per-vertex-per-face features, matching ``faces`` order, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`. For example: 1. Texture uv coordinates would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`. 2. RGB color values would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. When specified, it is used to interpolate the features for new sampled points. See also: :func:`~kaolin.ops.mesh.index_vertices_by_faces` for conversion of features defined per vertex and need to be converted to per-vertex-per-face shape of :math:`(\text{num_faces}, 3)`. Returns: (torch.Tensor, torch.LongTensor, (optional) torch.Tensor): the pointclouds of shape :math:`(\text{batch_size}, \text{num_samples}, 3)`, and the indexes of the faces selected, of shape :math:`(\text{batch_size}, \text{num_samples})`. If ``face_features`` arg is specified, then the interpolated features of sampled points of shape :math:`(\text{batch_size}, \text{num_samples}, \text{feature_dim})` are also returned. |
4,668 | import math
import torch
from ..batch import tile_to_packed, packed_to_padded, get_first_idx
def _base_face_areas(face_vertices_0, face_vertices_1, face_vertices_2):
"""Base function to compute the face areas."""
x1, x2, x3 = torch.split(face_vertices_0 - face_vertices_1, 1, dim=-1)
y1, y2, y3 = torch.split(face_vertices_1 - face_vertices_2, 1, dim=-1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
areas = torch.sqrt(a + b + c) * 0.5
return areas
def _base_sample_points_selected_faces(face_vertices, face_features=None):
"""Base function to sample points over selected faces.
The coordinates of the face vertices are interpolated to generate new samples.
Args:
face_vertices (tuple of torch.Tensor):
Coordinates of vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
face_features (tuple of torch.Tensor, Optional):
Features of face vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
Returns:
(torch.Tensor, torch.Tensor):
Sampled point coordinates of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
Sampled points interpolated features of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
If `face_vertices_features` arg is not specified, the returned interpolated features are None.
"""
face_vertices0, face_vertices1, face_vertices2 = face_vertices
sampling_shape = tuple(int(d) for d in face_vertices0.shape[:-1]) + (1,)
# u is proximity to middle point between v1 and v2 against v0.
# v is proximity to v2 against v1.
#
# The probability density for u should be f_U(u) = 2u.
# However, torch.rand use a uniform (f_X(x) = x) distribution,
# so using torch.sqrt we make a change of variable to have the desired density
# f_Y(y) = f_X(y ^ 2) * |d(y ^ 2) / dy| = 2y
u = torch.sqrt(torch.rand(sampling_shape,
device=face_vertices0.device,
dtype=face_vertices0.dtype))
v = torch.rand(sampling_shape,
device=face_vertices0.device,
dtype=face_vertices0.dtype)
w0 = 1 - u
w1 = u * (1 - v)
w2 = u * v
points = w0 * face_vertices0 + w1 * face_vertices1 + w2 * face_vertices2
features = None
if face_features is not None:
face_features0, face_features1, face_features2 = face_features
features = w0 * face_features0 + w1 * face_features1 + \
w2 * face_features2
return points, features
def get_first_idx(numel_per_tensor):
"""Returns the first indices of each tensor in the :ref:`packed tensor <packed>`.
See :ref:`first_idx definition <packed_first_idx>` for more information.
Args:
numel_per_tensor (torch.LongTensor): The number of elements
(vertices, faces, points...) in each unbatched tensor, as a 1D tensor.
Returns:
(torch.LongTensor):
first indices for each unbatched tensor in the packed tensor,
and the last index + 1, as 1D tensor.
Example:
>>> numel_per_tensor = torch.LongTensor([2, 3, 5])
>>> get_first_idx(numel_per_tensor)
tensor([ 0, 2, 5, 10])
"""
output = torch.zeros((numel_per_tensor.shape[0] + 1,), dtype=torch.long,
device=numel_per_tensor.device)
torch.cumsum(numel_per_tensor, dim=0, out=output[1:])
return output
def tile_to_packed(values, numel_per_tensor):
r"""Tiles values to a packed representation of numel_per_tensor,
Args:
values (torch.Tensor): tensor of shape :math:`(\text{batch_size},)` of values to be tiled.
numel_per_tensor (torch.LongTensor): number of elements per tensor of the output packed tensor.
Return:
torch.Tensor:
The :ref:`packed tensor<packed>` of tiled values of shape
:math:`(sum(\text{numel_per_tensor}), 1)`.
Example:
>>> values = torch.tensor([0., 6., 7.])
>>> numel_per_tensor = torch.LongTensor([2, 2, 3])
>>> tile_to_packed(values, numel_per_tensor)
tensor([[0.],
[0.],
[6.],
[6.],
[7.],
[7.],
[7.]])
"""
if torch.cuda.is_available() and values.is_cuda and not numel_per_tensor.is_cuda:
# TODO(cfujitsang): this could be externalized with lazy initialization
# currently kept inside as the slowdown is still reasonable
total_numel = torch.sum(numel_per_tensor)
tiled_packed_tensor = _TileToPackedCuda.apply(values, numel_per_tensor, total_numel)
else:
tiled_packed_tensor = torch.cat(
[torch.full((int(numel),), fill_value=value.item(), dtype=values.dtype, device=values.device)
for value, numel in zip(values, numel_per_tensor)], dim=0).unsqueeze(-1)
return tiled_packed_tensor
def packed_to_padded(packed_tensor, shape_per_tensor, first_idx, padding_value, max_shape=None):
"""Converts a single packed tensor into a padded tensor.
Args:
packed_tensor (torch.Tensor): a :ref:`packed tensor<packed>`.
shape_per_tensor (torch.LongTensor): the :ref:`shape_per_tensor<packed_shape_per_tensor>`
tensor associated to the padded tensor.
first_idx (torch.LongTensor): :ref:`first_idx<packed_first_idx>` associated to the packed tensor.
padding_value (float): the value that will be used as padding.
max_shape (list, tuple or torch.LongTensor): list of maximum value for each dim
of the output shape (except batch and last axis), if a value is set to None
then it will be the maximum value among the tensors.
Default: All maximum values among the tensors.
Returns:
(torch.Tensor): the :ref:`padded tensor<padded>`.
"""
batch_size = shape_per_tensor.shape[0]
last_dim = packed_tensor.shape[1]
max_shape = fill_max_shape(shape_per_tensor, max_shape)
output = torch.full((batch_size, *max_shape, last_dim), fill_value=padding_value,
device=packed_tensor.device, dtype=packed_tensor.dtype)
for i, shape in enumerate(shape_per_tensor):
output[[i] + [slice(elem_dim) for elem_dim in shape]] = \
packed_tensor[first_idx[i]:first_idx[i + 1]].reshape(*shape, last_dim)
return output
The provided code snippet includes necessary dependencies for implementing the `packed_sample_points` function. Write a Python function `def packed_sample_points(vertices, first_idx_vertices, faces, num_faces_per_mesh, num_samples, areas=None)` to solve the following problem:
r"""Uniformly sample points over the surface of triangle meshes. First face on which the point is sampled is randomly selected, with the probability of selection being proportional to the area of the face. then the coordinate on the face is uniformly sampled. The return pointclouds are with fixed batching. Args: vertices (torch.Tensor): The packed vertices of the meshes, of shape :math:`(\text{num_vertices}, 3)`. first_idx_vertices (torch.Tensor): The :ref:`first_idx<packed_first_idx>` associated to vertices, of shape :math:`(\text{batch_size})`. faces (torch.LongTensor): The packed faces of the meshes, of shape :math:`(\text{num_faces}, 3)`. num_faces_per_mesh: The number of faces per mesh, of shape :math:`(\text{batch_size})`. num_samples (int): The number of point sampled per mesh. areas (torch.Tensor, optional): The areas of each face, of shape :math:`(\text{num_faces})`, can be preprocessed, for fast on-the-fly sampling, will be computed if None (default). Returns: (torch.Tensor, torch.LongTensor): - The pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)``. - The indexes of the faces selected (as merged faces), of shape :math:`(\text{batch_size}, \text{num_points}).`
Here is the function:
def packed_sample_points(vertices, first_idx_vertices,
faces, num_faces_per_mesh, num_samples, areas=None):
r"""Uniformly sample points over the surface of triangle meshes.
First face on which the point is sampled is randomly selected,
with the probability of selection being proportional to the area of the face.
then the coordinate on the face is uniformly sampled.
The return pointclouds are with fixed batching.
Args:
vertices (torch.Tensor):
The packed vertices of the meshes, of shape :math:`(\text{num_vertices}, 3)`.
first_idx_vertices (torch.Tensor):
The :ref:`first_idx<packed_first_idx>` associated to vertices,
of shape :math:`(\text{batch_size})`.
faces (torch.LongTensor):
The packed faces of the meshes, of shape :math:`(\text{num_faces}, 3)`.
num_faces_per_mesh:
The number of faces per mesh, of shape :math:`(\text{batch_size})`.
num_samples (int):
The number of point sampled per mesh.
areas (torch.Tensor, optional):
The areas of each face, of shape :math:`(\text{num_faces})`,
can be preprocessed, for fast on-the-fly sampling,
will be computed if None (default).
Returns:
(torch.Tensor, torch.LongTensor):
- The pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points}, 3)``.
- The indexes of the faces selected (as merged faces), of shape
:math:`(\text{batch_size}, \text{num_points}).`
"""
if faces.shape[-1] != 3:
raise NotImplementedError("packed_sample_points is only implemented for triangle meshes")
batch_size = num_faces_per_mesh.shape[0]
merged_faces = tile_to_packed(first_idx_vertices[:-1].to(vertices.device),
num_faces_per_mesh) + faces
faces_0, faces_1, faces_2 = torch.split(merged_faces, 1, dim=1)
face_v_0 = torch.index_select(vertices, 0, faces_0.reshape(-1))
face_v_1 = torch.index_select(vertices, 0, faces_1.reshape(-1))
face_v_2 = torch.index_select(vertices, 0, faces_2.reshape(-1))
if areas is None:
areas = _base_face_areas(face_v_0, face_v_1, face_v_2).squeeze(-1)
# TODO(cfujitsang): this is kind of cheating, we should try to avoid padding on packed ops
# But is works well since setting 0. padding leads to 0. probability to be picked,
first_idx_faces = get_first_idx(num_faces_per_mesh)
areas = packed_to_padded(areas.reshape(-1, 1),
num_faces_per_mesh.reshape(-1, 1),
first_idx_faces,
0.).squeeze(-1)
face_dist = torch.distributions.Categorical(areas)
face_choices = face_dist.sample([num_samples]).transpose(0, 1)
# since face_v_X are still packed, we need to merged meshes indexes
merged_face_choices = \
(face_choices + first_idx_faces[:-1].reshape(-1, 1).to(face_choices.device)).reshape(-1)
v0 = torch.index_select(face_v_0, 0, merged_face_choices).reshape(batch_size, num_samples, 3)
v1 = torch.index_select(face_v_1, 0, merged_face_choices).reshape(batch_size, num_samples, 3)
v2 = torch.index_select(face_v_2, 0, merged_face_choices).reshape(batch_size, num_samples, 3)
points, _ = _base_sample_points_selected_faces((v0, v1, v2))
return points, merged_face_choices.reshape(batch_size, num_samples) | r"""Uniformly sample points over the surface of triangle meshes. First face on which the point is sampled is randomly selected, with the probability of selection being proportional to the area of the face. then the coordinate on the face is uniformly sampled. The return pointclouds are with fixed batching. Args: vertices (torch.Tensor): The packed vertices of the meshes, of shape :math:`(\text{num_vertices}, 3)`. first_idx_vertices (torch.Tensor): The :ref:`first_idx<packed_first_idx>` associated to vertices, of shape :math:`(\text{batch_size})`. faces (torch.LongTensor): The packed faces of the meshes, of shape :math:`(\text{num_faces}, 3)`. num_faces_per_mesh: The number of faces per mesh, of shape :math:`(\text{batch_size})`. num_samples (int): The number of point sampled per mesh. areas (torch.Tensor, optional): The areas of each face, of shape :math:`(\text{num_faces})`, can be preprocessed, for fast on-the-fly sampling, will be computed if None (default). Returns: (torch.Tensor, torch.LongTensor): - The pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)``. - The indexes of the faces selected (as merged faces), of shape :math:`(\text{batch_size}, \text{num_points}).` |
4,669 | import math
import torch
from ..batch import tile_to_packed, packed_to_padded, get_first_idx
The provided code snippet includes necessary dependencies for implementing the `face_normals` function. Write a Python function `def face_normals(face_vertices, unit=False)` to solve the following problem:
r"""Calculate normals of triangle meshes. Left-hand rule convention is used for picking normal direction. Args: face_vertices (torch.Tensor): of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. unit (bool): if true, return normals as unit vectors. Default: False. Returns: (torch.FloatTensor): face normals, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`
Here is the function:
def face_normals(face_vertices, unit=False):
r"""Calculate normals of triangle meshes. Left-hand rule convention is used for picking normal direction.
Args:
face_vertices (torch.Tensor):
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`.
unit (bool):
if true, return normals as unit vectors. Default: False.
Returns:
(torch.FloatTensor):
face normals, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)`
"""
if face_vertices.shape[-2] != 3:
raise NotImplementedError("face_normals is only implemented for triangle meshes")
# Note: Here instead of using the normals from vertexlist2facelist we compute it from scratch
edges_dist0 = face_vertices[:, :, 1] - face_vertices[:, :, 0]
edges_dist1 = face_vertices[:, :, 2] - face_vertices[:, :, 0]
face_normals = torch.cross(edges_dist0, edges_dist1, dim=2)
if unit:
face_normals_length = face_normals.norm(dim=2, keepdim=True)
face_normals = face_normals / (face_normals_length + 1e-10)
return face_normals | r"""Calculate normals of triangle meshes. Left-hand rule convention is used for picking normal direction. Args: face_vertices (torch.Tensor): of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. unit (bool): if true, return normals as unit vectors. Default: False. Returns: (torch.FloatTensor): face normals, of shape :math:`(\text{batch_size}, \text{num_faces}, 3)` |
4,670 | import math
import torch
from ..batch import tile_to_packed, packed_to_padded, get_first_idx
def _get_adj_verts(edges_ex2, v):
"""Get sparse adjacency matrix for vertices given edges"""
adj_sparse_idx = torch.cat([edges_ex2, torch.flip(edges_ex2, [1])])
adj_sparse_idx = torch.unique(adj_sparse_idx, dim=0)
values = torch.ones(
adj_sparse_idx.shape[0], device=edges_ex2.device).float()
adj_sparse = torch.sparse.FloatTensor(
adj_sparse_idx.t(), values, torch.Size([v, v]))
return adj_sparse
def _get_alpha(n):
"""Compute weight alpha based on number of neighboring vertices following Loop Subdivision"""
n = n.float()
alpha = (5.0 / 8 - (3.0 / 8 + 1.0 / 4 * torch.cos(2 * math.pi / n)) ** 2) / n
alpha[n == 3] = 3 / 16
return alpha
The provided code snippet includes necessary dependencies for implementing the `subdivide_trianglemesh` function. Write a Python function `def subdivide_trianglemesh(vertices, faces, iterations, alpha=None)` to solve the following problem:
r"""Subdivide triangular meshes following the scheme of Loop subdivision proposed in `Smooth Subdivision Surfaces Based on Triangles`_. If the smoothing factor alpha is not given, this function performs exactly as Loop subdivision. Elsewise the vertex position is updated using the given per-vertex alpha value, which is differentiable and the alpha carries over to subsequent subdivision iterations. Higher alpha leads to smoother surfaces, and a vertex with alpha = 0 will not change from its initial position during the subdivision. Thus, alpha can be learnable to preserve sharp geometric features in contrast to the original Loop subdivision. For more details and example usage in learning, see `Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021. Args: vertices (torch.Tensor): batched vertices of triangle meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): unbatched triangle mesh faces, of shape :math:`(\text{num_faces}, 3)`. iterations (int): number of subdivision iterations. alpha (optional, torch.Tensor): batched per-vertex smoothing factor, alpha, of shape :math:`(\text{batch_size}, \text{num_vertices})`. Returns: (torch.Tensor, torch.LongTensor): - batched vertices of triangle meshes, of shape :math:`(\text{batch_size}, \text{new_num_vertices}, 3)`. - unbatched triangle mesh faces, of shape :math:`(\text{num_faces} \cdot 4^\text{iterations}, 3)`. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2],[0, 1, 3],[0, 2, 3],[1, 2, 3]], dtype=torch.long) >>> alpha = torch.tensor([[0, 0, 0, 0]], dtype=torch.float) >>> new_vertices, new_faces = subdivide_trianglemesh(vertices, faces, 1, alpha) >>> new_vertices tensor([[[0.0000, 0.0000, 0.0000], [1.0000, 0.0000, 0.0000], [0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 1.0000], [0.3750, 0.1250, 0.1250], [0.1250, 0.3750, 0.1250], [0.1250, 0.1250, 0.3750], [0.3750, 0.3750, 0.1250], [0.3750, 0.1250, 0.3750], [0.1250, 0.3750, 0.3750]]]) >>> new_faces tensor([[1, 7, 4], [0, 4, 5], [2, 5, 7], [5, 4, 7], [1, 8, 4], [0, 4, 6], [3, 6, 8], [6, 4, 8], [2, 9, 5], [0, 5, 6], [3, 6, 9], [6, 5, 9], [2, 9, 7], [1, 7, 8], [3, 8, 9], [8, 7, 9]]) .. _Smooth Subdivision Surfaces Based on Triangles: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/thesis-10.pdf .. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis: https://arxiv.org/abs/2111.04276
Here is the function:
def subdivide_trianglemesh(vertices, faces, iterations, alpha=None):
r"""Subdivide triangular meshes following the scheme of Loop subdivision proposed in
`Smooth Subdivision Surfaces Based on Triangles`_.
If the smoothing factor alpha is not given, this function performs exactly as Loop subdivision.
Elsewise the vertex position is updated using the given per-vertex alpha value, which is
differentiable and the alpha carries over to subsequent subdivision iterations. Higher alpha leads
to smoother surfaces, and a vertex with alpha = 0 will not change from its initial position
during the subdivision. Thus, alpha can be learnable to preserve sharp geometric features in contrast to
the original Loop subdivision.
For more details and example usage in learning, see `Deep Marching Tetrahedra\: a Hybrid
Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021.
Args:
vertices (torch.Tensor): batched vertices of triangle meshes, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor): unbatched triangle mesh faces, of shape
:math:`(\text{num_faces}, 3)`.
iterations (int): number of subdivision iterations.
alpha (optional, torch.Tensor): batched per-vertex smoothing factor, alpha, of shape
:math:`(\text{batch_size}, \text{num_vertices})`.
Returns:
(torch.Tensor, torch.LongTensor):
- batched vertices of triangle meshes, of shape
:math:`(\text{batch_size}, \text{new_num_vertices}, 3)`.
- unbatched triangle mesh faces, of shape
:math:`(\text{num_faces} \cdot 4^\text{iterations}, 3)`.
Example:
>>> vertices = torch.tensor([[[0, 0, 0],
... [1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> faces = torch.tensor([[0, 1, 2],[0, 1, 3],[0, 2, 3],[1, 2, 3]], dtype=torch.long)
>>> alpha = torch.tensor([[0, 0, 0, 0]], dtype=torch.float)
>>> new_vertices, new_faces = subdivide_trianglemesh(vertices, faces, 1, alpha)
>>> new_vertices
tensor([[[0.0000, 0.0000, 0.0000],
[1.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 0.0000],
[0.0000, 0.0000, 1.0000],
[0.3750, 0.1250, 0.1250],
[0.1250, 0.3750, 0.1250],
[0.1250, 0.1250, 0.3750],
[0.3750, 0.3750, 0.1250],
[0.3750, 0.1250, 0.3750],
[0.1250, 0.3750, 0.3750]]])
>>> new_faces
tensor([[1, 7, 4],
[0, 4, 5],
[2, 5, 7],
[5, 4, 7],
[1, 8, 4],
[0, 4, 6],
[3, 6, 8],
[6, 4, 8],
[2, 9, 5],
[0, 5, 6],
[3, 6, 9],
[6, 5, 9],
[2, 9, 7],
[1, 7, 8],
[3, 8, 9],
[8, 7, 9]])
.. _Smooth Subdivision Surfaces Based on Triangles:
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/thesis-10.pdf
.. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis:
https://arxiv.org/abs/2111.04276
"""
init_alpha = alpha
for i in range(iterations):
device = vertices.device
b, v, f = vertices.shape[0], vertices.shape[1], faces.shape[0]
edges_fx3x2 = faces[:, [[0, 1], [1, 2], [2, 0]]]
edges_fx3x2_sorted, _ = torch.sort(edges_fx3x2.reshape(edges_fx3x2.shape[0] * edges_fx3x2.shape[1], 2), -1)
all_edges_face_idx = torch.arange(edges_fx3x2.shape[0], device=device).unsqueeze(-1).expand(-1, 3).reshape(-1)
edges_ex2, inverse_indices, counts = torch.unique(
edges_fx3x2_sorted, dim=0, return_counts=True, return_inverse=True)
# To compute updated vertex positions, first compute alpha for each vertex
# TODO(cfujitsang): unify _get_adj_verts with adjacency_matrix
adj_sparse = _get_adj_verts(edges_ex2, v)
n = torch.sparse.sum(adj_sparse, 0).to_dense().view(-1, 1)
if init_alpha is None:
alpha = (_get_alpha(n) * n).unsqueeze(0)
if alpha.dim() == 2:
alpha = alpha.unsqueeze(-1)
adj_verts_sum = torch.bmm(adj_sparse.unsqueeze(0), vertices)
vertices_new = (1 - alpha) * vertices + alpha / n * adj_verts_sum
e = edges_ex2.shape[0]
edge_points = torch.zeros((b, e, 3), device=device) # new point for every edge
edges_fx3 = inverse_indices.reshape(f, 3) + v
alpha_points = torch.zeros((b, e, 1), device=device)
mask_e = (counts == 2)
# edge points on boundary is computed as midpoint
if torch.sum(~mask_e) > 0:
edge_points[:, ~mask_e] += torch.mean(vertices[:,
edges_ex2[~mask_e].reshape(-1), :].reshape(b, -1, 2, 3), 2)
alpha_points[:, ~mask_e] += torch.mean(alpha[:, edges_ex2[~mask_e].reshape(-1), :].reshape(b, -1, 2, 1), 2)
counts_f = counts[inverse_indices]
mask_f = (counts_f == 2)
group = inverse_indices[mask_f]
_, indices = torch.sort(group)
edges_grouped = all_edges_face_idx[mask_f][indices]
edges_face_idx = torch.stack([edges_grouped[::2], edges_grouped[1::2]], dim=-1)
e_ = edges_face_idx.shape[0]
edges_face = faces[edges_face_idx.reshape(-1), :].reshape(-1, 2, 3)
edges_vert = vertices[:, edges_face.reshape(-1), :].reshape(b, e_, 6, 3)
edges_vert = torch.cat([edges_vert, vertices[:, edges_ex2[mask_e].reshape(-1),
:].reshape(b, -1, 2, 3)], 2).mean(2)
alpha_vert = alpha[:, edges_face.reshape(-1), :].reshape(b, e_, 6, 1)
alpha_vert = torch.cat([alpha_vert, alpha[:, edges_ex2[mask_e].reshape(-1),
:].reshape(b, -1, 2, 1)], 2).mean(2)
edge_points[:, mask_e] += edges_vert
alpha_points[:, mask_e] += alpha_vert
alpha = torch.cat([alpha, alpha_points], 1)
vertices = torch.cat([vertices_new, edge_points], 1)
faces = torch.cat([faces, edges_fx3], 1)
faces = faces[:, [[1, 4, 3], [0, 3, 5], [2, 5, 4], [5, 3, 4]]].reshape(-1, 3)
return vertices, faces | r"""Subdivide triangular meshes following the scheme of Loop subdivision proposed in `Smooth Subdivision Surfaces Based on Triangles`_. If the smoothing factor alpha is not given, this function performs exactly as Loop subdivision. Elsewise the vertex position is updated using the given per-vertex alpha value, which is differentiable and the alpha carries over to subsequent subdivision iterations. Higher alpha leads to smoother surfaces, and a vertex with alpha = 0 will not change from its initial position during the subdivision. Thus, alpha can be learnable to preserve sharp geometric features in contrast to the original Loop subdivision. For more details and example usage in learning, see `Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021. Args: vertices (torch.Tensor): batched vertices of triangle meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): unbatched triangle mesh faces, of shape :math:`(\text{num_faces}, 3)`. iterations (int): number of subdivision iterations. alpha (optional, torch.Tensor): batched per-vertex smoothing factor, alpha, of shape :math:`(\text{batch_size}, \text{num_vertices})`. Returns: (torch.Tensor, torch.LongTensor): - batched vertices of triangle meshes, of shape :math:`(\text{batch_size}, \text{new_num_vertices}, 3)`. - unbatched triangle mesh faces, of shape :math:`(\text{num_faces} \cdot 4^\text{iterations}, 3)`. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2],[0, 1, 3],[0, 2, 3],[1, 2, 3]], dtype=torch.long) >>> alpha = torch.tensor([[0, 0, 0, 0]], dtype=torch.float) >>> new_vertices, new_faces = subdivide_trianglemesh(vertices, faces, 1, alpha) >>> new_vertices tensor([[[0.0000, 0.0000, 0.0000], [1.0000, 0.0000, 0.0000], [0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 1.0000], [0.3750, 0.1250, 0.1250], [0.1250, 0.3750, 0.1250], [0.1250, 0.1250, 0.3750], [0.3750, 0.3750, 0.1250], [0.3750, 0.1250, 0.3750], [0.1250, 0.3750, 0.3750]]]) >>> new_faces tensor([[1, 7, 4], [0, 4, 5], [2, 5, 7], [5, 4, 7], [1, 8, 4], [0, 4, 6], [3, 6, 8], [6, 4, 8], [2, 9, 5], [0, 5, 6], [3, 6, 9], [6, 5, 9], [2, 9, 7], [1, 7, 8], [3, 8, 9], [8, 7, 9]]) .. _Smooth Subdivision Surfaces Based on Triangles: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/thesis-10.pdf .. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis: https://arxiv.org/abs/2111.04276 |
4,671 | import math
import torch
from ..batch import tile_to_packed, packed_to_padded, get_first_idx
The provided code snippet includes necessary dependencies for implementing the `vertex_tangents` function. Write a Python function `def vertex_tangents(faces, face_vertices, face_uvs, vertex_normals)` to solve the following problem:
r"""Compute vertex tangents. The vertex tangents are useful to apply normal maps during rendering. .. seealso:: https://en.wikipedia.org/wiki/Normal_mapping#Calculating_tangent_space Args: faces (torch.LongTensor): unbatched triangle mesh faces, of shape :math:`(\text{num_faces}, 3)`. face_vertices (torch.Tensor): unbatched triangle face vertices, of shape :math:`(\text{num_faces}, 3, 3)`. face_uvs (torch.Tensor): unbatched triangle UVs, of shape :math:`(\text{num_faces}, 3, 2)`. vertex_normals (torch.Tensor): unbatched vertex normals, of shape :math:`(\text{num_vertices}, 3)`. Returns: (torch.Tensor): The vertex tangents, of shape :math:`(\text{num_vertices, 3})`
Here is the function:
def vertex_tangents(faces, face_vertices, face_uvs, vertex_normals):
r"""Compute vertex tangents.
The vertex tangents are useful to apply normal maps during rendering.
.. seealso::
https://en.wikipedia.org/wiki/Normal_mapping#Calculating_tangent_space
Args:
faces (torch.LongTensor): unbatched triangle mesh faces, of shape
:math:`(\text{num_faces}, 3)`.
face_vertices (torch.Tensor): unbatched triangle face vertices, of shape
:math:`(\text{num_faces}, 3, 3)`.
face_uvs (torch.Tensor): unbatched triangle UVs, of shape
:math:`(\text{num_faces}, 3, 2)`.
vertex_normals (torch.Tensor): unbatched vertex normals, of shape
:math:`(\text{num_vertices}, 3)`.
Returns:
(torch.Tensor): The vertex tangents, of shape :math:`(\text{num_vertices, 3})`
"""
# This function is strongly inspired by
# https://github.com/NVlabs/nvdiffrec/blob/main/render/mesh.py#L203
tangents = torch.zeros_like(vertex_normals)
face_uvs0, face_uvs1, face_uvs2 = torch.split(face_uvs, 1, dim=-2)
fv0, fv1, fv2 = torch.split(face_vertices, 1, dim=-2)
uve1 = face_uvs1 - face_uvs0
uve2 = face_uvs2 - face_uvs0
pe1 = (fv1 - fv0).squeeze(-2)
pe2 = (fv2 - fv0).squeeze(-2)
nom = pe1 * uve2[..., 1] - pe2 * uve1[..., 1]
denom = uve1[..., 0] * uve2[..., 1] - uve1[..., 1] * uve2[..., 0]
# Avoid division by zero for degenerated texture coordinates
tang = nom / torch.where(
denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)
)
vn_idx = torch.split(faces, 1, dim=-1)
indexing_dim = 0 if face_vertices.ndim == 3 else 1
# TODO(cfujitsang): optimizable?
for i in range(3):
idx = vn_idx[i].repeat(1, 3)
tangents.scatter_add_(indexing_dim, idx, tang)
# Normalize and make sure tangent is perpendicular to normal
tangents = torch.nn.functional.normalize(tangents, dim=1)
tangents = torch.nn.functional.normalize(
tangents -
torch.sum(tangents * vertex_normals, dim=-1, keepdim=True) *
vertex_normals
)
if torch.is_anomaly_enabled():
assert torch.all(torch.isfinite(tangents))
return tangents | r"""Compute vertex tangents. The vertex tangents are useful to apply normal maps during rendering. .. seealso:: https://en.wikipedia.org/wiki/Normal_mapping#Calculating_tangent_space Args: faces (torch.LongTensor): unbatched triangle mesh faces, of shape :math:`(\text{num_faces}, 3)`. face_vertices (torch.Tensor): unbatched triangle face vertices, of shape :math:`(\text{num_faces}, 3, 3)`. face_uvs (torch.Tensor): unbatched triangle UVs, of shape :math:`(\text{num_faces}, 3, 2)`. vertex_normals (torch.Tensor): unbatched vertex normals, of shape :math:`(\text{num_vertices}, 3)`. Returns: (torch.Tensor): The vertex tangents, of shape :math:`(\text{num_vertices, 3})` |
4,672 | import torch
from kaolin.ops.conversions.tetmesh import _sort_edges
def _validate_tet_vertices(tet_vertices):
r"""Helper method to validate the dimensions of the batched tetrahedrons tensor.
Args:
tet_vertices (torch.Tensor):
Batched tetrahedrons, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`.
"""
assert tet_vertices.ndim == 4, \
f"tetrahedrons has {tetrahedrons.ndim} but must have 4 dimensions."
assert tet_vertices.shape[2] == 4, \
f"The third dimension of the tetrahedrons must be 4 " \
f"but the input has {tetrahedrons.shape[2]}. Each tetrahedron has 4 vertices."
assert tet_vertices.shape[3] == 3, \
f"The fourth dimension of the tetrahedrons must be 3 " \
f"but the input has {tetrahedrons.shape[3]}. Each vertex must have 3 dimensions."
The provided code snippet includes necessary dependencies for implementing the `inverse_vertices_offset` function. Write a Python function `def inverse_vertices_offset(tet_vertices)` to solve the following problem:
r"""Given tetrahedrons with 4 vertices A, B, C, D. Compute the inverse of the offset matrix w.r.t. vertex A for each tetrahedron. The offset matrix is obtained by the concatenation of :math:`B - A`, :math:`C - A` and :math:`D - A`. The resulting shape of the offset matrix is :math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`. The inverse of the offset matrix is computed by this function. Args: tet_vertices (torch.Tensor): Batched tetrahedrons, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`. Returns: (torch.Tensor): Batched inverse offset matrix, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`. Each offset matrix is of shape :math:`(3, 3)`, hence its inverse is also of shape :math:`(3, 3)`. Example: >>> tet_vertices = torch.tensor([[[[-0.0500, 0.0000, 0.0500], ... [-0.0250, -0.0500, 0.0000], ... [ 0.0000, 0.0000, 0.0500], ... [0.5000, 0.5000, 0.4500]]]]) >>> inverse_vertices_offset(tet_vertices) tensor([[[[ 0.0000, 20.0000, 0.0000], [ 79.9999, -149.9999, 10.0000], [ -99.9999, 159.9998, -10.0000]]]])
Here is the function:
def inverse_vertices_offset(tet_vertices):
r"""Given tetrahedrons with 4 vertices A, B, C, D.
Compute the inverse of the offset matrix w.r.t. vertex A for each
tetrahedron. The offset matrix is obtained by the concatenation of :math:`B - A`,
:math:`C - A` and :math:`D - A`. The resulting shape
of the offset matrix is :math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`.
The inverse of the offset matrix is computed by this function.
Args:
tet_vertices (torch.Tensor):
Batched tetrahedrons, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`.
Returns:
(torch.Tensor):
Batched inverse offset matrix, of shape
:math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`.
Each offset matrix is of shape :math:`(3, 3)`,
hence its inverse is also of shape :math:`(3, 3)`.
Example:
>>> tet_vertices = torch.tensor([[[[-0.0500, 0.0000, 0.0500],
... [-0.0250, -0.0500, 0.0000],
... [ 0.0000, 0.0000, 0.0500],
... [0.5000, 0.5000, 0.4500]]]])
>>> inverse_vertices_offset(tet_vertices)
tensor([[[[ 0.0000, 20.0000, 0.0000],
[ 79.9999, -149.9999, 10.0000],
[ -99.9999, 159.9998, -10.0000]]]])
"""
_validate_tet_vertices(tet_vertices)
# split the tensor
A, B, C, D = torch.split(tet_vertices, split_size_or_sections=1, dim=2)
# compute the offset matrix w.r.t. vertex A
offset_matrix = torch.cat([B - A, C - A, D - A], dim=2)
# compute the inverse of the offset matrix
inverse_offset_matrix = torch.inverse(offset_matrix)
return inverse_offset_matrix | r"""Given tetrahedrons with 4 vertices A, B, C, D. Compute the inverse of the offset matrix w.r.t. vertex A for each tetrahedron. The offset matrix is obtained by the concatenation of :math:`B - A`, :math:`C - A` and :math:`D - A`. The resulting shape of the offset matrix is :math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`. The inverse of the offset matrix is computed by this function. Args: tet_vertices (torch.Tensor): Batched tetrahedrons, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 4, 3)`. Returns: (torch.Tensor): Batched inverse offset matrix, of shape :math:`(\text{batch_size}, \text{num_tetrahedrons}, 3, 3)`. Each offset matrix is of shape :math:`(3, 3)`, hence its inverse is also of shape :math:`(3, 3)`. Example: >>> tet_vertices = torch.tensor([[[[-0.0500, 0.0000, 0.0500], ... [-0.0250, -0.0500, 0.0000], ... [ 0.0000, 0.0000, 0.0500], ... [0.5000, 0.5000, 0.4500]]]]) >>> inverse_vertices_offset(tet_vertices) tensor([[[[ 0.0000, 20.0000, 0.0000], [ 79.9999, -149.9999, 10.0000], [ -99.9999, 159.9998, -10.0000]]]]) |
4,673 | import torch
from kaolin.ops.conversions.tetmesh import _sort_edges
base_tet_edges = torch.tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long)
def _sort_edges(edges):
"""sort last dimension of edges of shape (E, 2)"""
with torch.no_grad():
order = (edges[:, 0] > edges[:, 1]).long()
order = order.unsqueeze(dim=1)
a = torch.gather(input=edges, index=order, dim=1)
b = torch.gather(input=edges, index=1 - order, dim=1)
return torch.stack([a, b], -1)
The provided code snippet includes necessary dependencies for implementing the `subdivide_tetmesh` function. Write a Python function `def subdivide_tetmesh(vertices, tetrahedrons, features=None)` to solve the following problem:
r"""Subdivide each tetrahedron in tetmesh into 8 smaller tetrahedrons by adding midpoints. If per-vertex features (e.g. SDF value) are given, the features of the new vertices are computed by averaging the features of vertices on the edge. For more details and example usage in learning, see `Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021. Args: vertices (torch.Tensor): batched vertices of tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. tetrahedrons (torch.LongTensor): unbatched tetrahedral mesh topology, of shape :math:`(\text{num_tetrahedrons}, 4)`. features (optional, torch.Tensor): batched per-vertex feature vectors, of shape :math:`(\text{batch_size}, \text{num_vertices}, \text{feature_dim})`. Returns: (torch.Tensor, torch.LongTensor, (optional) torch.Tensor): - batched vertices of subdivided tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{new_num_vertices}, 3)` - unbatched tetrahedral mesh topology, of shape :math:`(\text{num_tetrahedrons} * 8, 4)`. - batched per-vertex feature vectors of subdivided tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{new_num_vertices}, \text{feature_dim})`. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> tetrahedrons = torch.tensor([[0, 1, 2, 3]], dtype=torch.long) >>> sdf = torch.tensor([[[-1.], [-1.], [0.5], [0.5]]], dtype=torch.float) >>> new_vertices, new_tetrahedrons, new_sdf = subdivide_tetmesh(vertices, tetrahedrons, sdf) >>> new_vertices tensor([[[0.0000, 0.0000, 0.0000], [1.0000, 0.0000, 0.0000], [0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 1.0000], [0.5000, 0.0000, 0.0000], [0.0000, 0.5000, 0.0000], [0.0000, 0.0000, 0.5000], [0.5000, 0.5000, 0.0000], [0.5000, 0.0000, 0.5000], [0.0000, 0.5000, 0.5000]]]) >>> new_tetrahedrons tensor([[0, 4, 5, 6], [1, 7, 4, 8], [2, 5, 7, 9], [3, 6, 9, 8], [4, 5, 6, 8], [4, 5, 8, 7], [9, 5, 8, 6], [9, 5, 7, 8]]) >>> new_sdf tensor([[[-1.0000], [-1.0000], [ 0.5000], [ 0.5000], [-1.0000], [-0.2500], [-0.2500], [-0.2500], [-0.2500], [ 0.5000]]]) .. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis: https://arxiv.org/abs/2111.04276
Here is the function:
def subdivide_tetmesh(vertices, tetrahedrons, features=None):
r"""Subdivide each tetrahedron in tetmesh into 8 smaller tetrahedrons
by adding midpoints. If per-vertex features (e.g. SDF value) are given, the features
of the new vertices are computed by averaging the features of vertices on the edge.
For more details and example usage in learning, see
`Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021.
Args:
vertices (torch.Tensor): batched vertices of tetrahedral meshes, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
tetrahedrons (torch.LongTensor): unbatched tetrahedral mesh topology, of shape
:math:`(\text{num_tetrahedrons}, 4)`.
features (optional, torch.Tensor): batched per-vertex feature vectors, of shape
:math:`(\text{batch_size}, \text{num_vertices}, \text{feature_dim})`.
Returns:
(torch.Tensor, torch.LongTensor, (optional) torch.Tensor):
- batched vertices of subdivided tetrahedral meshes, of shape
:math:`(\text{batch_size}, \text{new_num_vertices}, 3)`
- unbatched tetrahedral mesh topology, of shape
:math:`(\text{num_tetrahedrons} * 8, 4)`.
- batched per-vertex feature vectors of subdivided tetrahedral meshes, of shape
:math:`(\text{batch_size}, \text{new_num_vertices}, \text{feature_dim})`.
Example:
>>> vertices = torch.tensor([[[0, 0, 0],
... [1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> tetrahedrons = torch.tensor([[0, 1, 2, 3]], dtype=torch.long)
>>> sdf = torch.tensor([[[-1.], [-1.], [0.5], [0.5]]], dtype=torch.float)
>>> new_vertices, new_tetrahedrons, new_sdf = subdivide_tetmesh(vertices, tetrahedrons, sdf)
>>> new_vertices
tensor([[[0.0000, 0.0000, 0.0000],
[1.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 0.0000],
[0.0000, 0.0000, 1.0000],
[0.5000, 0.0000, 0.0000],
[0.0000, 0.5000, 0.0000],
[0.0000, 0.0000, 0.5000],
[0.5000, 0.5000, 0.0000],
[0.5000, 0.0000, 0.5000],
[0.0000, 0.5000, 0.5000]]])
>>> new_tetrahedrons
tensor([[0, 4, 5, 6],
[1, 7, 4, 8],
[2, 5, 7, 9],
[3, 6, 9, 8],
[4, 5, 6, 8],
[4, 5, 8, 7],
[9, 5, 8, 6],
[9, 5, 7, 8]])
>>> new_sdf
tensor([[[-1.0000],
[-1.0000],
[ 0.5000],
[ 0.5000],
[-1.0000],
[-0.2500],
[-0.2500],
[-0.2500],
[-0.2500],
[ 0.5000]]])
.. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis:
https://arxiv.org/abs/2111.04276
"""
device = vertices.device
all_edges = tetrahedrons[:, base_tet_edges].reshape(-1, 2)
all_edges = _sort_edges(all_edges)
unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)
idx_map = idx_map + vertices.shape[1]
pos_feature = torch.cat([vertices, features], -1) if (features is not None) else vertices
mid_pos_feature = pos_feature[:, unique_edges.reshape(-1)].reshape(
pos_feature.shape[0], -1, 2, pos_feature.shape[-1]).mean(2)
new_pos_feature = torch.cat([pos_feature, mid_pos_feature], 1)
new_pos, new_features = new_pos_feature[..., :3], new_pos_feature[..., 3:]
idx_a, idx_b, idx_c, idx_d = torch.split(tetrahedrons, 1, -1)
idx_ab, idx_ac, idx_ad, idx_bc, idx_bd, idx_cd = idx_map.reshape(-1, 6).split(1, -1)
tet_1 = torch.stack([idx_a, idx_ab, idx_ac, idx_ad], dim=1)
tet_2 = torch.stack([idx_b, idx_bc, idx_ab, idx_bd], dim=1)
tet_3 = torch.stack([idx_c, idx_ac, idx_bc, idx_cd], dim=1)
tet_4 = torch.stack([idx_d, idx_ad, idx_cd, idx_bd], dim=1)
tet_5 = torch.stack([idx_ab, idx_ac, idx_ad, idx_bd], dim=1)
tet_6 = torch.stack([idx_ab, idx_ac, idx_bd, idx_bc], dim=1)
tet_7 = torch.stack([idx_cd, idx_ac, idx_bd, idx_ad], dim=1)
tet_8 = torch.stack([idx_cd, idx_ac, idx_bc, idx_bd], dim=1)
new_tetrahedrons = torch.cat([tet_1, tet_2, tet_3, tet_4, tet_5, tet_6, tet_7, tet_8], dim=0).squeeze(-1)
return (new_pos, new_tetrahedrons) if features is None else (new_pos, new_tetrahedrons, new_features) | r"""Subdivide each tetrahedron in tetmesh into 8 smaller tetrahedrons by adding midpoints. If per-vertex features (e.g. SDF value) are given, the features of the new vertices are computed by averaging the features of vertices on the edge. For more details and example usage in learning, see `Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021. Args: vertices (torch.Tensor): batched vertices of tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. tetrahedrons (torch.LongTensor): unbatched tetrahedral mesh topology, of shape :math:`(\text{num_tetrahedrons}, 4)`. features (optional, torch.Tensor): batched per-vertex feature vectors, of shape :math:`(\text{batch_size}, \text{num_vertices}, \text{feature_dim})`. Returns: (torch.Tensor, torch.LongTensor, (optional) torch.Tensor): - batched vertices of subdivided tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{new_num_vertices}, 3)` - unbatched tetrahedral mesh topology, of shape :math:`(\text{num_tetrahedrons} * 8, 4)`. - batched per-vertex feature vectors of subdivided tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{new_num_vertices}, \text{feature_dim})`. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> tetrahedrons = torch.tensor([[0, 1, 2, 3]], dtype=torch.long) >>> sdf = torch.tensor([[[-1.], [-1.], [0.5], [0.5]]], dtype=torch.float) >>> new_vertices, new_tetrahedrons, new_sdf = subdivide_tetmesh(vertices, tetrahedrons, sdf) >>> new_vertices tensor([[[0.0000, 0.0000, 0.0000], [1.0000, 0.0000, 0.0000], [0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 1.0000], [0.5000, 0.0000, 0.0000], [0.0000, 0.5000, 0.0000], [0.0000, 0.0000, 0.5000], [0.5000, 0.5000, 0.0000], [0.5000, 0.0000, 0.5000], [0.0000, 0.5000, 0.5000]]]) >>> new_tetrahedrons tensor([[0, 4, 5, 6], [1, 7, 4, 8], [2, 5, 7, 9], [3, 6, 9, 8], [4, 5, 6, 8], [4, 5, 8, 7], [9, 5, 8, 6], [9, 5, 7, 8]]) >>> new_sdf tensor([[[-1.0000], [-1.0000], [ 0.5000], [ 0.5000], [-1.0000], [-0.2500], [-0.2500], [-0.2500], [-0.2500], [ 0.5000]]]) .. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis: https://arxiv.org/abs/2111.04276 |
4,674 | from __future__ import annotations
import torch
The provided code snippet includes necessary dependencies for implementing the `center_points` function. Write a Python function `def center_points(points: torch.FloatTensor, normalize: bool = False, eps=1e-6)` to solve the following problem:
r"""Returns points centered at the origin for every pointcloud. If `normalize` is set, will also normalize each point cloud spearately to the range of [-0.5, 0.5]. Note that each point cloud is centered individually. Args: points (torch.FloatTensor): point clouds of shape :math:`(\text{batch_size}, \text{num_points}, 3)`, (other channel numbers supported). normalize (bool): if true, will also normalize each point cloud to be in the range [-0.5, 0.5] eps (float): eps to use to avoid division by zero when normalizing Return: (torch.FloatTensor) modified points with same shape, device and dtype as input
Here is the function:
def center_points(points: torch.FloatTensor, normalize: bool = False, eps=1e-6):
r"""Returns points centered at the origin for every pointcloud. If `normalize` is
set, will also normalize each point cloud spearately to the range of [-0.5, 0.5].
Note that each point cloud is centered individually.
Args:
points (torch.FloatTensor): point clouds of shape :math:`(\text{batch_size}, \text{num_points}, 3)`,
(other channel numbers supported).
normalize (bool): if true, will also normalize each point cloud to be in the range [-0.5, 0.5]
eps (float): eps to use to avoid division by zero when normalizing
Return:
(torch.FloatTensor) modified points with same shape, device and dtype as input
"""
assert len(points.shape) == 3, f'Points have unexpected shape {points.shape}'
vmin = points.min(dim=1, keepdim=True)[0]
vmax = points.max(dim=1, keepdim=True)[0]
vmid = (vmin + vmax) / 2
res = points - vmid
if normalize:
den = (vmax - vmin).max(dim=-1, keepdim=True)[0].clip(min=eps)
res = res / den
return res | r"""Returns points centered at the origin for every pointcloud. If `normalize` is set, will also normalize each point cloud spearately to the range of [-0.5, 0.5]. Note that each point cloud is centered individually. Args: points (torch.FloatTensor): point clouds of shape :math:`(\text{batch_size}, \text{num_points}, 3)`, (other channel numbers supported). normalize (bool): if true, will also normalize each point cloud to be in the range [-0.5, 0.5] eps (float): eps to use to avoid division by zero when normalizing Return: (torch.FloatTensor) modified points with same shape, device and dtype as input |
4,675 | import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `sparse_bmm` function. Write a Python function `def sparse_bmm(sparse_matrix, dense_matrix_batch)` to solve the following problem:
r"""Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix. Args: sparse_matrix (torch.sparse.FloatTensor): Input sparse matrix, of shape :math:`(\text{M}, \text{N})`. dense_matrix_batch (torch.FloatTensor): Input batched dense matrix, of shape :math:`(\text{batch_size}, \text{N}, \text{P})`. Returns: (torch.FloatTensor): Result of the batched matrix multiplication, of shape, :math:`(\text{batch_size}, \text{N}, \text{P})`.
Here is the function:
def sparse_bmm(sparse_matrix, dense_matrix_batch):
r"""Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix.
Args:
sparse_matrix (torch.sparse.FloatTensor):
Input sparse matrix, of shape :math:`(\text{M}, \text{N})`.
dense_matrix_batch (torch.FloatTensor):
Input batched dense matrix, of shape
:math:`(\text{batch_size}, \text{N}, \text{P})`.
Returns:
(torch.FloatTensor):
Result of the batched matrix multiplication, of shape,
:math:`(\text{batch_size}, \text{N}, \text{P})`.
"""
m = sparse_matrix.shape[0]
b, n, p = dense_matrix_batch.shape
# Stack the matrix batch into columns. (b, n, p) -> (n, b * p)
dense_matrix = dense_matrix_batch.transpose(0, 1).reshape(n, b * p)
result = torch.sparse.mm(sparse_matrix, dense_matrix)
# Reverse the reshaping. (m, b * p) -> (b, m, p)
return result.reshape(m, b, p).transpose(0, 1) | r"""Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix. Args: sparse_matrix (torch.sparse.FloatTensor): Input sparse matrix, of shape :math:`(\text{M}, \text{N})`. dense_matrix_batch (torch.FloatTensor): Input batched dense matrix, of shape :math:`(\text{batch_size}, \text{N}, \text{P})`. Returns: (torch.FloatTensor): Result of the batched matrix multiplication, of shape, :math:`(\text{batch_size}, \text{N}, \text{P})`. |
4,676 | import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `normalize_adj` function. Write a Python function `def normalize_adj(adj)` to solve the following problem:
r"""Normalize the adjacency matrix such that the sum of each row is 1. This operation is slow, so it should be done only once for a graph and then reused. This supports both sparse tensor and regular tensor. The return type will be the same as the input type. For example, if the input is a sparse tensor, the normalized matrix will also be a sparse tensor. Args: adj (torch.sparse.FloatTensor or torch.FloatTensor): Input adjacency matrix, of shape :math:`(\text{num_nodes}, \text{num_nodes})`. Returns: (torch.sparse.FloatTensor or torch.FloatTensor): A new adjacency matrix with the same connectivity as the input, but with the sum of each row normalized to 1.
Here is the function:
def normalize_adj(adj):
r"""Normalize the adjacency matrix such that the sum of each row is 1.
This operation is slow, so it should be done only once for a graph and then
reused.
This supports both sparse tensor and regular tensor. The return type will be
the same as the input type. For example, if the input is a sparse tensor,
the normalized matrix will also be a sparse tensor.
Args:
adj (torch.sparse.FloatTensor or torch.FloatTensor):
Input adjacency matrix, of shape :math:`(\text{num_nodes}, \text{num_nodes})`.
Returns:
(torch.sparse.FloatTensor or torch.FloatTensor):
A new adjacency matrix with the same connectivity as the input, but
with the sum of each row normalized to 1.
"""
if adj.type().endswith('sparse.FloatTensor'):
norm = torch.sparse.mm(adj, torch.ones((adj.shape[0], 1),
device=adj.device)).squeeze(1)
indices = adj._indices()
values = adj._values() / norm.gather(dim=0, index=indices[0, :])
return torch.sparse.FloatTensor(
indices, values, adj.shape).to(adj.device)
else:
norm = torch.matmul(adj, torch.ones((adj.shape[0], 1),
device=adj.device))
return adj / norm | r"""Normalize the adjacency matrix such that the sum of each row is 1. This operation is slow, so it should be done only once for a graph and then reused. This supports both sparse tensor and regular tensor. The return type will be the same as the input type. For example, if the input is a sparse tensor, the normalized matrix will also be a sparse tensor. Args: adj (torch.sparse.FloatTensor or torch.FloatTensor): Input adjacency matrix, of shape :math:`(\text{num_nodes}, \text{num_nodes})`. Returns: (torch.sparse.FloatTensor or torch.FloatTensor): A new adjacency matrix with the same connectivity as the input, but with the sum of each row normalized to 1. |
4,677 | import random
import math
import numpy as np
import torch
from .spc.uint8 import uint8_to_bits
import random
The provided code snippet includes necessary dependencies for implementing the `manual_seed` function. Write a Python function `def manual_seed(torch_seed, random_seed=None, numpy_seed=None)` to solve the following problem:
Set the seed for random and torch modules. Args: torch_seed (int): The desired seed for torch module. random_seed (int): The desired seed for random module. Default: ``torch_seed`` value. numpy_seed (int): The desired seed for numpy module. Default: ``torch_seed`` value.
Here is the function:
def manual_seed(torch_seed, random_seed=None, numpy_seed=None):
"""Set the seed for random and torch modules.
Args:
torch_seed (int): The desired seed for torch module.
random_seed (int): The desired seed for random module. Default: ``torch_seed`` value.
numpy_seed (int): The desired seed for numpy module. Default: ``torch_seed`` value.
"""
if random_seed is None:
random_seed = torch_seed
if numpy_seed is None:
numpy_seed = torch_seed
random.seed(random_seed)
torch.manual_seed(torch_seed)
np.random.seed(numpy_seed) | Set the seed for random and torch modules. Args: torch_seed (int): The desired seed for torch module. random_seed (int): The desired seed for random module. Default: ``torch_seed`` value. numpy_seed (int): The desired seed for numpy module. Default: ``torch_seed`` value. |
4,678 | import random
import math
import numpy as np
import torch
from .spc.uint8 import uint8_to_bits
import random
The provided code snippet includes necessary dependencies for implementing the `set_state` function. Write a Python function `def set_state(torch_state, random_state, numpy_state)` to solve the following problem:
Set the generator states for generating random numbers. Mostly used in pair with :func:`get_state` Args: torch_state (torch.ByteTensor): the state of torch module. random_state (tuple): the state of random module. numpy_state (tuple): the state of numpy module. Example: >>> torch_state, random_state, numpy_state = get_state() >>> s = torch.randn((1, 3)) >>> set_state(torch_state, random_state, numpy_state)
Here is the function:
def set_state(torch_state, random_state, numpy_state):
"""Set the generator states for generating random numbers.
Mostly used in pair with :func:`get_state`
Args:
torch_state (torch.ByteTensor): the state of torch module.
random_state (tuple): the state of random module.
numpy_state (tuple): the state of numpy module.
Example:
>>> torch_state, random_state, numpy_state = get_state()
>>> s = torch.randn((1, 3))
>>> set_state(torch_state, random_state, numpy_state)
"""
torch.set_rng_state(torch_state)
random.setstate(random_state)
np.random.set_state(numpy_state) | Set the generator states for generating random numbers. Mostly used in pair with :func:`get_state` Args: torch_state (torch.ByteTensor): the state of torch module. random_state (tuple): the state of random module. numpy_state (tuple): the state of numpy module. Example: >>> torch_state, random_state, numpy_state = get_state() >>> s = torch.randn((1, 3)) >>> set_state(torch_state, random_state, numpy_state) |
4,679 | import random
import math
import numpy as np
import torch
from .spc.uint8 import uint8_to_bits
import random
The provided code snippet includes necessary dependencies for implementing the `get_state` function. Write a Python function `def get_state()` to solve the following problem:
Returns the generator states for generating random numbers. Mostly used in pair with :func:`set_state`. See also: * https://pytorch.org/docs/stable/generated/torch.get_rng_state.html#torch.get_rng_state * https://docs.python.org/3/library/random.html#random.getstate * https://numpy.org/doc/stable/reference/random/generated/numpy.random.set_state.html#numpy.random.set_state Returns: (torch.ByteTensor, tuple, tuple): the states for the corresponding modules (torch, random, numpy). Example: >>> torch_state, random_state, numpy_state = get_state() >>> s = torch.randn((1, 3)) >>> set_state(torch_state, random_state, numpy_state)
Here is the function:
def get_state():
"""Returns the generator states for generating random numbers.
Mostly used in pair with :func:`set_state`.
See also:
* https://pytorch.org/docs/stable/generated/torch.get_rng_state.html#torch.get_rng_state
* https://docs.python.org/3/library/random.html#random.getstate
* https://numpy.org/doc/stable/reference/random/generated/numpy.random.set_state.html#numpy.random.set_state
Returns:
(torch.ByteTensor, tuple, tuple):
the states for the corresponding modules (torch, random, numpy).
Example:
>>> torch_state, random_state, numpy_state = get_state()
>>> s = torch.randn((1, 3))
>>> set_state(torch_state, random_state, numpy_state)
"""
return torch.get_rng_state(), random.getstate(), np.random.get_state() | Returns the generator states for generating random numbers. Mostly used in pair with :func:`set_state`. See also: * https://pytorch.org/docs/stable/generated/torch.get_rng_state.html#torch.get_rng_state * https://docs.python.org/3/library/random.html#random.getstate * https://numpy.org/doc/stable/reference/random/generated/numpy.random.set_state.html#numpy.random.set_state Returns: (torch.ByteTensor, tuple, tuple): the states for the corresponding modules (torch, random, numpy). Example: >>> torch_state, random_state, numpy_state = get_state() >>> s = torch.randn((1, 3)) >>> set_state(torch_state, random_state, numpy_state) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.