id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
6,008 | import os
from pyUltroid import ULTConfig
import qrcode
from PIL import Image
from telethon.tl.types import MessageMediaDocument as doc
from . import check_filename, get_string, ultroid_bot, ultroid_cmd
import qrcode
async def qrwater(e):
msg = e.pattern_match.group(1).strip()
r = await e.get_reply_message()
dl = await e.client.download_media(
r, thumb=-1 if isinstance(r.media, doc) else None
)
if not dl:
return await e.eor("`Reply Any Media and Give Text`", time=5)
kk = await e.eor(get_string("com_1"))
img_bg = Image.open(dl)
qr = qrcode.QRCode(box_size=5)
qr.add_data(msg)
qr.make()
img_qr = qr.make_image()
pos = (img_bg.size[0] - img_qr.size[0], img_bg.size[1] - img_qr.size[1])
img_bg.paste(img_qr, pos)
img_bg.save(dl)
await e.client.send_file(e.chat_id, dl, supports_streaming=True)
await kk.delete()
os.remove(dl) | null |
6,009 | import os
from pyUltroid import ULTConfig
try:
import cv2
except ImportError:
cv2 = None
import qrcode
from PIL import Image
from telethon.tl.types import MessageMediaDocument as doc
from . import check_filename, get_string, ultroid_bot, ultroid_cmd
async def decod(e):
r = await e.get_reply_message()
if not (r and r.media):
return await e.eor("`Reply to Qrcode Media`", time=5)
kk = await e.eor(get_string("com_1"))
dl = await e.client.download_media(
r, thumb=-1 if isinstance(r.media, doc) else None
)
if not dl:
return
im = cv2.imread(dl)
try:
det = cv2.QRCodeDetector()
tx, y, z = det.detectAndDecode(im)
await kk.edit("**Decoded Text:\n\n**" + tx)
except BaseException:
await kk.edit("`Reply To Media in Which Qr image present.`")
os.remove(dl) | null |
6,010 | from telethon.errors.rpcerrorlist import (
BotInlineDisabledError,
BotMethodInvalidError,
BotResponseTimeoutError,
)
from telethon.tl.custom import Button
from pyUltroid.dB._core import HELP, LIST
from pyUltroid.fns.tools import cmd_regex_replace
from . import HNDLR, LOGS, OWNER_NAME, asst, get_string, inline_pic, udB, ultroid_cmd
_main_help_menu = [
[
Button.inline(get_string("help_4"), data="uh_Official_"),
Button.inline(get_string("help_5"), data="uh_Addons_"),
],
[
Button.inline(get_string("help_6"), data="uh_VCBot_"),
Button.inline(get_string("help_7"), data="inlone"),
],
[
Button.inline(get_string("help_8"), data="ownr"),
Button.url(
get_string("help_9"), url=f"https://t.me/{asst.me.username}?start=set"
),
],
[Button.inline(get_string("help_10"), data="close")],
]
HELP = {}
LIST = {}
from .. import *
def cmd_regex_replace(cmd):
return (
cmd.replace("$", "")
.replace("?(.*)", "")
.replace("(.*)", "")
.replace("(?: |)", "")
.replace("| ", "")
.replace("( |)", "")
.replace("?((.|//)*)", "")
.replace("?P<shortname>\\w+", "")
.replace("(", "")
.replace(")", "")
.replace("?(\\d+)", "")
)
async def _help(ult):
plug = ult.pattern_match.group(1).strip()
chat = await ult.get_chat()
if plug:
try:
if plug in HELP["Official"]:
output = f"**Plugin** - `{plug}`\n"
for i in HELP["Official"][plug]:
output += i
output += "\n© @TeamUltroid"
await ult.eor(output)
elif HELP.get("Addons") and plug in HELP["Addons"]:
output = f"**Plugin** - `{plug}`\n"
for i in HELP["Addons"][plug]:
output += i
output += "\n© @TeamUltroid"
await ult.eor(output)
elif HELP.get("VCBot") and plug in HELP["VCBot"]:
output = f"**Plugin** - `{plug}`\n"
for i in HELP["VCBot"][plug]:
output += i
output += "\n© @TeamUltroid"
await ult.eor(output)
else:
try:
x = get_string("help_11").format(plug)
for d in LIST[plug]:
x += HNDLR + d
x += "\n"
x += "\n© @TeamUltroid"
await ult.eor(x)
except BaseException:
file = None
compare_strings = []
for file_name in LIST:
compare_strings.append(file_name)
value = LIST[file_name]
for j in value:
j = cmd_regex_replace(j)
compare_strings.append(j)
if j.strip() == plug:
file = file_name
break
if not file:
# the enter command/plugin name is not found
text = f"`{plug}` is not a valid plugin!"
best_match = None
for _ in compare_strings:
if plug in _ and not _.startswith("_"):
best_match = _
break
if best_match:
text += f"\nDid you mean `{best_match}`?"
return await ult.eor(text)
output = f"**Command** `{plug}` **found in plugin** - `{file}`\n"
if file in HELP["Official"]:
for i in HELP["Official"][file]:
output += i
elif HELP.get("Addons") and file in HELP["Addons"]:
for i in HELP["Addons"][file]:
output += i
elif HELP.get("VCBot") and file in HELP["VCBot"]:
for i in HELP["VCBot"][file]:
output += i
output += "\n© @TeamUltroid"
await ult.eor(output)
except BaseException as er:
LOGS.exception(er)
await ult.eor("Error 🤔 occured.")
else:
try:
results = await ult.client.inline_query(asst.me.username, "ultd")
except BotMethodInvalidError:
z = []
for x in LIST.values():
z.extend(x)
cmd = len(z) + 10
if udB.get_key("MANAGER") and udB.get_key("DUAL_HNDLR") == "/":
_main_help_menu[2:3] = [[Button.inline("• Manager Help •", "mngbtn")]]
return await ult.reply(
get_string("inline_4").format(
OWNER_NAME,
len(HELP["Official"]),
len(HELP["Addons"] if "Addons" in HELP else []),
cmd,
),
file=inline_pic(),
buttons=_main_help_menu,
)
except BotResponseTimeoutError:
return await ult.eor(
get_string("help_2").format(HNDLR),
)
except BotInlineDisabledError:
return await ult.eor(get_string("help_3"))
await results[0].click(chat.id, reply_to=ult.reply_to_msg_id, hide_via=True)
await ult.delete() | null |
6,011 | from . import get_help
import os
import random
from telethon.utils import get_display_name
from urllib.parse import urlencode
from . import Carbon, ultroid_cmd, get_string, inline_mention
from secrets import token_hex
if os.path.exists(_colorspath):
with open(_colorspath, "r") as f:
all_col = f.read().split()
else:
all_col = []
pattern="(rc|c)arbon",
async def cr_bn(event):
xxxx = await event.eor(get_string("com_1"))
te = event.pattern_match.group(1)
col = random.choice(all_col) if te[0] == "r" else "White"
if event.reply_to_msg_id:
temp = await event.get_reply_message()
if temp.media:
b = await event.client.download_media(temp)
with open(b) as a:
code = a.read()
os.remove(b)
else:
code = temp.message
else:
try:
code = event.text.split(" ", maxsplit=1)[1]
except IndexError:
return await xxxx.eor(get_string("carbon_2"))
xx = await Carbon(code=code, file_name="ultroid_carbon", backgroundColor=col)
if isinstance(xx, dict):
await xxxx.edit(f"`{xx}`")
return
await xxxx.delete()
await event.reply(
f"Carbonised by {inline_mention(event.sender)}",
file=xx,
) | null |
6,012 | from . import get_help
import os
import random
from telethon.utils import get_display_name
from urllib.parse import urlencode
from . import Carbon, ultroid_cmd, get_string, inline_mention
from secrets import token_hex
if os.path.exists(_colorspath):
with open(_colorspath, "r") as f:
all_col = f.read().split()
else:
all_col = []
pattern="(rc|c)arbon",
async def crbn(event):
match = event.pattern_match.group(1).strip()
if not match:
return await event.eor(get_string("carbon_3"))
msg = await event.eor(get_string("com_1"))
if event.reply_to_msg_id:
temp = await event.get_reply_message()
if temp.media:
b = await event.client.download_media(temp)
with open(b) as a:
code = a.read()
os.remove(b)
else:
code = temp.message
else:
try:
match = match.split(" ", maxsplit=1)
code = match[1]
match = match[0]
except IndexError:
return await msg.eor(get_string("carbon_2"))
xx = await Carbon(code=code, backgroundColor=match)
await msg.delete()
await event.reply(
f"Carbonised by {inline_mention(event.sender)}",
file=xx,
) | null |
6,013 | from . import get_help
import os
import random
from telethon.utils import get_display_name
from urllib.parse import urlencode
from . import Carbon, ultroid_cmd, get_string, inline_mention
from secrets import token_hex
if os.path.exists(_colorspath):
with open(_colorspath, "r") as f:
all_col = f.read().split()
else:
all_col = []
pattern="(rc|c)arbon",
RaySoTheme = [
"meadow",
"breeze",
"raindrop",
"candy",
"crimson",
"falcon",
"sunset",
"midnight",
]
async def pass_on(ult):
try:
from playwright.async_api import async_playwright
except ImportError:
await ult.eor("`playwright` is not installed!\nPlease install it to use this command..")
return
proc = await ult.eor(get_string("com_1"))
spli = ult.text.split()
theme, dark, title, text = None, True, get_display_name(ult.chat), None
if len(spli) > 2:
if spli[1] in RaySoTheme:
theme = spli[1]
dark = spli[2].lower().strip() in ["true", "t"]
elif len(spli) > 1:
if spli[1] in RaySoTheme:
theme = spli[1]
elif spli[1] == "list":
text = "**List of Rayso Themes:**\n" + "\n".join(
[f"- `{th_}`" for th_ in RaySoTheme]
)
await proc.eor(text)
return
else:
try:
text = ult.text.split(maxsplit=1)[1]
except IndexError:
pass
if not theme or theme not in RaySoTheme:
theme = random.choice(RaySoTheme)
if ult.is_reply:
msg = await ult.get_reply_message()
text = msg.message
title = get_display_name(msg.sender)
name = token_hex(8) + ".png"
data = {
"darkMode": dark,
"theme": theme,
"title": title
}
url = f"https://ray.so/#{urlencode(data)}"
async with async_playwright() as play:
chrome = await play.chromium.launch()
page = await chrome.new_page()
await page.goto(url)
await page.wait_for_load_state("networkidle")
elem = await page.query_selector("textarea[class='Editor_textarea__sAyL_']")
await elem.type(text)
button = await page.query_selector("button[class='ExportButton_button__d___t']")
await button.click()
async with page.expect_download() as dl:
dled = await dl.value
await dled.save_as(name)
await proc.reply(
file=name
)
await proc.try_delete()
os.remove(name) | null |
6,014 | import os
from . import LOGS, con
try:
import cv2
except ImportError:
LOGS.error(f"{__file__}: OpenCv not Installed.")
import numpy as np
from telegraph import upload_file as upf
from telethon.errors.rpcerrorlist import (
ChatSendMediaForbiddenError,
MessageDeleteForbiddenError,
)
from . import (
Redis,
async_searcher,
download_file,
get_string,
requests,
udB,
ultroid_cmd,
)
async def _(event):
async def ult_tools(event):
match = event.pattern_match.group(1)
ureply = await event.get_reply_message()
if not (ureply and (ureply.media)):
await event.eor(get_string("cvt_3"))
return
ultt = await ureply.download_media()
xx = await event.eor(get_string("com_1"))
if ultt.endswith(".tgs"):
xx = await xx.edit(get_string("sts_9"))
file = await con.convert(ultt, convert_to="png", outname="ult")
ult = cv2.imread(file)
if match == "grey":
ultroid = cv2.cvtColor(ult, cv2.COLOR_BGR2GRAY)
elif match == "blur":
ultroid = cv2.GaussianBlur(ult, (35, 35), 0)
elif match == "negative":
ultroid = cv2.bitwise_not(ult)
elif match == "danger":
dan = cv2.cvtColor(ult, cv2.COLOR_BGR2RGB)
ultroid = cv2.cvtColor(dan, cv2.COLOR_HSV2BGR)
elif match == "mirror":
ish = cv2.flip(ult, 1)
ultroid = cv2.hconcat([ult, ish])
elif match == "flip":
trn = cv2.flip(ult, 1)
ish = cv2.rotate(trn, cv2.ROTATE_180)
ultroid = cv2.vconcat([ult, ish])
elif match == "quad":
ult = cv2.imread(file)
roid = cv2.flip(ult, 1)
mici = cv2.hconcat([ult, roid])
fr = cv2.flip(mici, 1)
trn = cv2.rotate(fr, cv2.ROTATE_180)
ultroid = cv2.vconcat([mici, trn])
elif match == "sketch":
gray_image = cv2.cvtColor(ult, cv2.COLOR_BGR2GRAY)
inverted_gray_image = 255 - gray_image
blurred_img = cv2.GaussianBlur(inverted_gray_image, (21, 21), 0)
inverted_blurred_img = 255 - blurred_img
ultroid = cv2.divide(gray_image, inverted_blurred_img, scale=256.0)
elif match == "toon":
height, width, _ = ult.shape
samples = np.zeros([height * width, 3], dtype=np.float32)
count = 0
for x in range(height):
for y in range(width):
samples[count] = ult[x][y]
count += 1
_, labels, centers = cv2.kmeans(
samples,
12,
None,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
5,
cv2.KMEANS_PP_CENTERS,
)
centers = np.uint8(centers)
ish = centers[labels.flatten()]
ultroid = ish.reshape(ult.shape)
cv2.imwrite("ult.jpg", ultroid)
await ureply.reply(
file="ult.jpg",
force_document=False,
)
await xx.delete()
os.remove("ult.jpg")
os.remove(file) | null |
6,015 | import os
from . import LOGS, con
import numpy as np
from telegraph import upload_file as upf
from telethon.errors.rpcerrorlist import (
ChatSendMediaForbiddenError,
MessageDeleteForbiddenError,
)
from . import (
Redis,
async_searcher,
download_file,
get_string,
requests,
udB,
ultroid_cmd,
)
async def sampl(ult):
if color := ult.pattern_match.group(1).strip():
img = Image.new("RGB", (200, 100), f"{color}")
img.save("csample.png")
try:
try:
await ult.delete()
await ult.respond(f"Colour Sample for `{color}` !", file="csample.png")
except MessageDeleteForbiddenError:
await ult.reply(f"Colour Sample for `{color}` !", file="csample.png")
except ChatSendMediaForbiddenError:
await ult.eor("Umm! Sending Media is disabled here!")
else:
await ult.eor("Wrong Color Name/Hex Code specified!") | null |
6,016 | import os
from . import LOGS, con
import numpy as np
from telegraph import upload_file as upf
from telethon.errors.rpcerrorlist import (
ChatSendMediaForbiddenError,
MessageDeleteForbiddenError,
)
from . import (
Redis,
async_searcher,
download_file,
get_string,
requests,
udB,
ultroid_cmd,
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await event.eor("`...`")
if not (ureply and (ureply.media)):
await xx.edit(get_string("cvt_3"))
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit(get_string("sts_9"))
file = await con.convert(ultt, convert_to="png", outname="ult")
got = upf(file)
lnk = f"https://graph.org{got[0]}"
r = await async_searcher(
f"https://nekobot.xyz/api/imagegen?type=blurpify&image={lnk}", re_json=True
)
ms = r.get("message")
if not r["success"]:
return await xx.edit(ms)
await download_file(ms, "ult.png")
img = Image.open("ult.png").convert("RGB")
img.save("ult.webp", "webp")
await event.client.send_file(
event.chat_id,
"ult.webp",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.webp")
os.remove(ultt) | null |
6,017 | import os
from . import LOGS, con
try:
import cv2
except ImportError:
LOGS.error(f"{__file__}: OpenCv not Installed.")
import numpy as np
from telegraph import upload_file as upf
from telethon.errors.rpcerrorlist import (
ChatSendMediaForbiddenError,
MessageDeleteForbiddenError,
)
from . import (
Redis,
async_searcher,
download_file,
get_string,
requests,
udB,
ultroid_cmd,
)
async def ok(event):
hm = await event.get_reply_message()
if not (hm and (hm.photo or hm.sticker)):
return await event.eor("`Reply to Sticker or Photo..`")
col = event.pattern_match.group(1).strip()
wh = 20
if not col:
col = [255, 255, 255]
else:
try:
if ";" in col:
col_ = col.split(";", maxsplit=1)
wh = int(col_[1])
col = col_[0]
col = [int(col) for col in col.split(",")[:2]]
except ValueError:
return await event.eor("`Not a Valid Input...`")
okla = await hm.download_media()
img1 = cv2.imread(okla)
constant = cv2.copyMakeBorder(img1, wh, wh, wh, wh, cv2.BORDER_CONSTANT, value=col)
cv2.imwrite("output.png", constant)
await event.client.send_file(event.chat.id, "output.png")
os.remove("output.png")
os.remove(okla)
await event.delete() | null |
6,018 | import os
from . import LOGS, con
try:
import cv2
except ImportError:
LOGS.error(f"{__file__}: OpenCv not Installed.")
import numpy as np
from telegraph import upload_file as upf
from telethon.errors.rpcerrorlist import (
ChatSendMediaForbiddenError,
MessageDeleteForbiddenError,
)
from . import (
Redis,
async_searcher,
download_file,
get_string,
requests,
udB,
ultroid_cmd,
)
async def pixelator(event):
reply_message = await event.get_reply_message()
if not (reply_message and (reply_message.photo or reply_message.sticker)):
return await event.eor("`Reply to a photo`")
hw = 50
try:
hw = int(event.pattern_match.group(1).strip())
except (ValueError, TypeError):
pass
msg = await event.eor(get_string("com_1"))
image = await reply_message.download_media()
input_ = cv2.imread(image)
height, width = input_.shape[:2]
w, h = (hw, hw)
temp = cv2.resize(input_, (w, h), interpolation=cv2.INTER_LINEAR)
output = cv2.resize(temp, (width, height), interpolation=cv2.INTER_NEAREST)
cv2.imwrite("output.jpg", output)
await msg.respond("• Pixelated by Ultroid", file="output.jpg")
await msg.delete()
os.remove("output.jpg")
os.remove(image) | null |
6,019 | from . import get_help
import string
from . import eod, ultroid_cmd
Fonts = {
"small caps": "ᴀʙᴄᴅᴇғɢʜɪᴊᴋʟᴍɴᴏᴘϙʀsᴛᴜᴠᴡxʏᴢABCDEFGHIJKLMNOPQRSTUVWXYZ",
"monospace": "𝚊𝚋𝚌𝚍𝚎𝚏𝚐𝚑𝚒𝚓𝚔𝚕𝚖𝚗𝚘𝚙𝚚𝚛𝚜𝚝𝚞𝚟𝚠𝚡𝚢𝚣𝙰𝙱𝙲𝙳𝙴𝙵𝙶𝙷𝙸𝙹𝙺𝙻𝙼𝙽𝙾𝙿𝚀𝚁𝚂𝚃𝚄𝚅𝚆𝚇𝚈𝚉",
"double stroke": "𝕒𝕓𝕔𝕕𝕖𝕗𝕘𝕙𝕚𝕛𝕜𝕝𝕞𝕟𝕠𝕡𝕢𝕣𝕤𝕥𝕦𝕧𝕨𝕩𝕪𝕫𝔸𝔹ℂ𝔻𝔼𝔽𝔾ℍ𝕀𝕁𝕂𝕃𝕄ℕ𝕆ℙℚℝ𝕊𝕋𝕌𝕍𝕎𝕏𝕐ℤ",
"script royal": "𝒶𝒷𝒸𝒹𝑒𝒻𝑔𝒽𝒾𝒿𝓀𝓁𝓂𝓃𝑜𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵",
}
def gen_font(text, new_font):
new_font = " ".join(new_font).split()
for q in text:
if q in _default:
new = new_font[_default.index(q)]
text = text.replace(q, new)
return text
async def _(e):
input = e.pattern_match.group(1).strip()
reply = await e.get_reply_message()
if not input:
m = "**Available Fonts**\n\n"
for x in Fonts.keys():
m += f"• `{x}`\n"
return await e.eor(m, time=5)
if not reply:
try:
_ = input.split(":", maxsplit=1)
font = _[0][:-1]
text = _[1]
except IndexError:
return await eod(e, help)
elif not input:
return await eod(e, "`Give font dude :/`")
else:
font = input
text = reply.message
if font not in Fonts.keys():
return await e.eor(f"`{font} not in font list`.", time=5)
msg = gen_font(text, Fonts[font])
await e.eor(msg) | null |
6,020 | from . import get_help
from pyUltroid.dB.blacklist_db import (
add_blacklist,
get_blacklist,
list_blacklist,
rem_blacklist,
)
from . import events, get_string, udB, ultroid_bot, ultroid_cmd
async def blacklist(e):
def add_blacklist(chat, word):
async def af(e):
wrd = e.pattern_match.group(1).strip()
chat = e.chat_id
if not (wrd):
return await e.eor(get_string("blk_1"), time=5)
wrd = e.text[11:]
heh = wrd.split(" ")
for z in heh:
add_blacklist(int(chat), z.lower())
ultroid_bot.add_handler(blacklist, events.NewMessage(incoming=True))
await e.eor(get_string("blk_2").format(wrd)) | null |
6,021 | from . import get_help
from pyUltroid.dB.blacklist_db import (
add_blacklist,
get_blacklist,
list_blacklist,
rem_blacklist,
)
from . import events, get_string, udB, ultroid_bot, ultroid_cmd
def rem_blacklist(chat, word):
ok = get_stuff()
if ok.get(chat) and word in ok[chat]:
ok[chat].remove(word)
return udB.set_key("BLACKLIST_DB", ok)
async def rf(e):
wrd = e.pattern_match.group(1).strip()
chat = e.chat_id
if not wrd:
return await e.eor(get_string("blk_3"), time=5)
wrd = e.text[14:]
heh = wrd.split(" ")
for z in heh:
rem_blacklist(int(chat), z.lower())
await e.eor(get_string("blk_4").format(wrd)) | null |
6,022 | from . import get_help
from pyUltroid.dB.blacklist_db import (
add_blacklist,
get_blacklist,
list_blacklist,
rem_blacklist,
)
from . import events, get_string, udB, ultroid_bot, ultroid_cmd
def list_blacklist(chat):
ok = get_stuff()
if ok.get(chat):
txt = "".join(f"👉`{z}`\n" for z in ok[chat])
if txt:
return txt
async def lsnote(e):
if x := list_blacklist(e.chat_id):
sd = get_string("blk_5")
return await e.eor(sd + x)
await e.eor(get_string("blk_6")) | null |
6,023 | import glob
import io
import os
import random
from os import remove
from telethon.errors import PeerIdInvalidError, YouBlockedUserError
from telethon.tl.functions.messages import UploadMediaRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputPeerSelf,
)
from telethon.utils import get_input_document
from . import (
KANGING_STR,
LOGS,
asst,
async_searcher,
bash,
con,
functions,
get_string,
inline_mention,
mediainfo,
quotly,
types,
udB,
ultroid_cmd,
)
async def pack_kangish(_):
_e = await _.get_reply_message()
local = None
try:
cmdtext = _.text.split(maxsplit=1)[1]
except IndexError:
cmdtext = None
if cmdtext and os.path.isdir(cmdtext):
local = True
elif not (_e and _e.sticker and _e.file.mime_type == "image/webp"):
return await _.eor(get_string("sts_4"))
msg = await _.eor(get_string("com_1"))
_packname = cmdtext or f"Ultroid Kang Pack By {_.sender_id}"
typee = None
if not local:
_id = _e.media.document.attributes[1].stickerset.id
_hash = _e.media.document.attributes[1].stickerset.access_hash
_get_stiks = await _.client(
functions.messages.GetStickerSetRequest(
stickerset=types.InputStickerSetID(id=_id, access_hash=_hash), hash=0
)
)
docs = _get_stiks.documents
else:
docs = []
files = glob.glob(cmdtext + "/*")
exte = files[-1]
if exte.endswith(".tgs"):
typee = "anim"
elif exte.endswith(".webm"):
typee = "vid"
count = 0
for file in files:
if file.endswith((".tgs", ".webm")):
count += 1
upl = await asst.upload_file(file)
docs.append(await asst(UploadMediaRequest(InputPeerSelf(), upl)))
if count % 5 == 0:
await msg.edit(f"`Uploaded {count} files.`")
stiks = []
for i in docs:
x = get_input_document(i)
stiks.append(
types.InputStickerSetItem(
document=x,
emoji=random.choice(["😐", "👍", "😂"])
if local
else (i.attributes[1]).alt,
)
)
try:
short_name = "ult_" + _packname.replace(" ", "_") + str(_.id)
_r_e_s = await asst(
functions.stickers.CreateStickerSetRequest(
user_id=_.sender_id,
title=_packname,
short_name=f"{short_name}_by_{asst.me.username}",
animated=typee == "anim",
videos=typee == "vid",
stickers=stiks,
)
)
except PeerIdInvalidError:
return await msg.eor(
f"Hey {inline_mention(_.sender)} send `/start` to @{asst.me.username} and later try this command again.."
)
except BaseException as er:
LOGS.exception(er)
return await msg.eor(str(er))
await msg.eor(
get_string("sts_5").format(f"https://t.me/addstickers/{_r_e_s.set.short_name}"),
) | null |
6,024 | import glob
import io
import os
import random
from os import remove
try:
import cv2
except ImportError:
cv2 = None
from telethon.errors import PeerIdInvalidError, YouBlockedUserError
from telethon.tl.functions.messages import UploadMediaRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputPeerSelf,
)
from telethon.utils import get_input_document
from . import (
KANGING_STR,
LOGS,
asst,
async_searcher,
bash,
con,
functions,
get_string,
inline_mention,
mediainfo,
quotly,
types,
udB,
ultroid_cmd,
)
async def hehe(args):
ultroid_bot = args.client
xx = await args.eor(get_string("com_1"))
user = ultroid_bot.me
username = user.username
username = f"@{username}" if username else user.first_name
message = await args.get_reply_message()
photo = None
is_anim, is_vid = False, False
emoji = None
if not message:
return await xx.eor(get_string("sts_6"))
if message.photo:
photo = io.BytesIO()
photo = await ultroid_bot.download_media(message.photo, photo)
elif message.file and "image" in message.file.mime_type.split("/"):
photo = io.BytesIO()
await ultroid_bot.download_file(message.media.document, photo)
if (
DocumentAttributeFilename(file_name="sticker.webp")
in message.media.document.attributes
):
emoji = message.media.document.attributes[1].alt
elif message.file and "video" in message.file.mime_type.split("/"):
xy = await message.download_media()
if (message.file.duration or 0) <= 10:
is_vid = True
photo = await con.create_webm(xy)
else:
y = cv2.VideoCapture(xy)
heh, lol = y.read()
cv2.imwrite("ult.webp", lol)
photo = "ult.webp"
elif message.file and "tgsticker" in message.file.mime_type:
await ultroid_bot.download_file(
message.media.document,
"AnimatedSticker.tgs",
)
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
is_anim = True
photo = 1
elif message.message:
photo = await quotly.create_quotly(message)
else:
return await xx.edit(get_string("com_4"))
if not udB.get_key("language") or udB.get_key("language") == "en":
ra = random.choice(KANGING_STR)
else:
ra = get_string("sts_11")
await xx.edit(f"`{ra}`")
if photo:
splat = args.text.split()
pack = 1
if not emoji:
emoji = "🏵"
if len(splat) == 3:
pack = splat[2] # User sent ultroid_both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
pack = int(splat[1])
else:
emoji = splat[1]
packname = f"ult_{user.id}_{pack}"
packnick = f"{username}'s Pack {pack}"
cmd = "/newpack"
file = io.BytesIO()
if is_vid:
packname += "_vid"
packnick += " (Video)"
cmd = "/newvideo"
elif is_anim:
packname += "_anim"
packnick += " (Animated)"
cmd = "/newanimated"
else:
image = con.resize_photo_sticker(photo)
file.name = "sticker.png"
image.save(file, "PNG")
response = await async_searcher(f"http://t.me/addstickers/{packname}")
htmlstr = response.split("\n")
if (
" A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>."
not in htmlstr
):
async with ultroid_bot.conversation("@Stickers") as conv:
try:
await conv.send_message("/addsticker")
except YouBlockedUserError:
LOGS.info("Unblocking @Stickers for kang...")
await ultroid_bot(functions.contacts.UnblockRequest("stickers"))
await conv.send_message("/addsticker")
await conv.get_response()
await conv.send_message(packname)
x = await conv.get_response()
if x.text.startswith("Alright! Now send me the video sticker."):
await conv.send_file(photo, force_document=True)
x = await conv.get_response()
t = "50" if (is_anim or is_vid) else "120"
while t in x.message:
pack += 1
packname = f"ult_{user.id}_{pack}"
packnick = f"{username}'s Pack {pack}"
if is_anim:
packname += "_anim"
packnick += " (Animated)"
elif is_vid:
packnick += " (Video)"
packname += "_vid"
await xx.edit(get_string("sts_13").format(pack))
await conv.send_message("/addsticker")
await conv.get_response()
await conv.send_message(packname)
x = await conv.get_response()
if x.text.startswith("Alright! Now send me the video sticker."):
await conv.send_file(photo, force_document=True)
x = await conv.get_response()
if x.text in ["Invalid pack selected.", "Invalid set selected."]:
await conv.send_message(cmd)
await conv.get_response()
await conv.send_message(packnick)
await conv.get_response()
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
if is_vid:
file = photo
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
await conv.get_response()
await conv.send_message("/skip")
await conv.get_response()
await conv.send_message(packname)
await conv.get_response()
await xx.edit(
get_string("sts_7").format(packname),
parse_mode="md",
)
return
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
elif "send me an emoji" not in x.message:
if is_vid:
file = photo
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await xx.edit(
get_string("sts_8"),
)
return
await conv.send_message(emoji)
await conv.get_response()
await conv.send_message("/done")
await conv.get_response()
await ultroid_bot.send_read_acknowledge(conv.chat_id)
else:
await xx.edit("`Brewing a new Pack...`")
async with ultroid_bot.conversation("Stickers") as conv:
await conv.send_message(cmd)
await conv.get_response()
await conv.send_message(packnick)
await conv.get_response()
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
if is_vid:
file = photo
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await xx.edit(
get_string("sts_8"),
)
return
await conv.send_message(emoji)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
await conv.get_response()
await conv.send_message("/skip")
await conv.get_response()
await conv.send_message(packname)
await conv.get_response()
await ultroid_bot.send_read_acknowledge(conv.chat_id)
await xx.edit(
get_string("sts_12").format(emoji, packname),
parse_mode="md",
)
try:
os.remove(photo)
except BaseException:
pass | null |
6,025 | import glob
import io
import os
import random
from os import remove
from telethon.errors import PeerIdInvalidError, YouBlockedUserError
from telethon.tl.functions.messages import UploadMediaRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputPeerSelf,
)
from telethon.utils import get_input_document
from . import (
KANGING_STR,
LOGS,
asst,
async_searcher,
bash,
con,
functions,
get_string,
inline_mention,
mediainfo,
quotly,
types,
udB,
ultroid_cmd,
)
async def ultdround(event):
ureply = await event.get_reply_message()
xx = await event.eor(get_string("com_1"))
if not (ureply and (ureply.media)):
await xx.edit(get_string("sts_10"))
return
ultt = await ureply.download_media()
file = await con.convert(
ultt,
convert_to="png",
allowed_formats=["jpg", "jpeg", "png"],
outname="round",
remove_old=True,
)
img = Image.open(file).convert("RGB")
npImage = np.array(img)
h, w = img.size
alpha = Image.new("L", img.size, 0)
draw = ImageDraw.Draw(alpha)
draw.pieslice([0, 0, h, w], 0, 360, fill=255)
npAlpha = np.array(alpha)
npImage = np.dstack((npImage, npAlpha))
Image.fromarray(npImage).save("ult.webp")
await event.client.send_file(
event.chat_id,
"ult.webp",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove(file)
os.remove("ult.webp") | null |
6,026 | import glob
import io
import os
import random
from os import remove
from telethon.errors import PeerIdInvalidError, YouBlockedUserError
from telethon.tl.functions.messages import UploadMediaRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputPeerSelf,
)
from telethon.utils import get_input_document
from . import (
KANGING_STR,
LOGS,
asst,
async_searcher,
bash,
con,
functions,
get_string,
inline_mention,
mediainfo,
quotly,
types,
udB,
ultroid_cmd,
)
async def ultdestroy(event):
ult = await event.get_reply_message()
if not (ult and ult.media and "animated" in mediainfo(ult.media)):
return await event.eor(get_string("sts_2"))
await event.client.download_media(ult, "ultroid.tgs")
xx = await event.eor(get_string("com_1"))
await bash("lottie_convert.py ultroid.tgs json.json")
with open("json.json") as json:
jsn = json.read()
jsn = (
jsn.replace("[100]", "[200]")
.replace("[10]", "[40]")
.replace("[-1]", "[-10]")
.replace("[0]", "[15]")
.replace("[1]", "[20]")
.replace("[2]", "[17]")
.replace("[3]", "[40]")
.replace("[4]", "[37]")
.replace("[5]", "[60]")
.replace("[6]", "[70]")
.replace("[7]", "[40]")
.replace("[8]", "[37]")
.replace("[9]", "[110]")
)
open("json.json", "w").write(jsn)
file = await con.animated_sticker("json.json", "ultroid.tgs")
if file:
await event.client.send_file(
event.chat_id,
file="ultroid.tgs",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("json.json") | null |
6,027 | import glob
import io
import os
import random
from os import remove
try:
import cv2
except ImportError:
cv2 = None
from telethon.errors import PeerIdInvalidError, YouBlockedUserError
from telethon.tl.functions.messages import UploadMediaRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputPeerSelf,
)
from telethon.utils import get_input_document
from . import (
KANGING_STR,
LOGS,
asst,
async_searcher,
bash,
con,
functions,
get_string,
inline_mention,
mediainfo,
quotly,
types,
udB,
ultroid_cmd,
)
async def ultiny(event):
reply = await event.get_reply_message()
if not (reply and (reply.media)):
await event.eor(get_string("sts_10"))
return
xx = await event.eor(get_string("com_1"))
ik = await reply.download_media()
im1 = Image.open("resources/extras/ultroid_blank.png")
if ik.endswith(".tgs"):
await con.animated_sticker(ik, "json.json")
with open("json.json") as json:
jsn = json.read()
jsn = jsn.replace("512", "2000")
open("json.json", "w").write(jsn)
await con.animated_sticker("json.json", "ult.tgs")
file = "ult.tgs"
os.remove("json.json")
elif ik.endswith((".gif", ".webm", ".mp4")):
iik = cv2.VideoCapture(ik)
dani, busy = iik.read()
cv2.imwrite("i.png", busy)
fil = "i.png"
im = Image.open(fil)
z, d = im.size
if z == d:
xxx, yyy = 200, 200
else:
t = z + d
a = z / t
b = d / t
aa = (a * 100) - 50
bb = (b * 100) - 50
xxx = 200 + 5 * aa
yyy = 200 + 5 * bb
k = im.resize((int(xxx), int(yyy)))
k.save("k.png", format="PNG", optimize=True)
im2 = Image.open("k.png")
back_im = im1.copy()
back_im.paste(im2, (150, 0))
back_im.save("o.webp", "WEBP", quality=95)
file = "o.webp"
os.remove(fil)
os.remove("k.png")
else:
im = Image.open(ik)
z, d = im.size
if z == d:
xxx, yyy = 200, 200
else:
t = z + d
a = z / t
b = d / t
aa = (a * 100) - 50
bb = (b * 100) - 50
xxx = 200 + 5 * aa
yyy = 200 + 5 * bb
k = im.resize((int(xxx), int(yyy)))
k.save("k.png", format="PNG", optimize=True)
im2 = Image.open("k.png")
back_im = im1.copy()
back_im.paste(im2, (150, 0))
back_im.save("o.webp", "WEBP", quality=95)
file = "o.webp"
os.remove("k.png")
if os.path.exists(file):
await event.client.send_file(
event.chat_id, file, reply_to=event.reply_to_msg_id
)
os.remove(file)
await xx.delete()
os.remove(ik) | null |
6,028 | from telethon.tl.types import InputMediaPoll, Poll, PollAnswer
from . import get_string, ultroid_cmd
async def uri_poll(e):
if not e.client._bot and e.is_private:
return await e.eor("`Use this in Group/Channel.`", time=15)
match = e.pattern_match.group(1).strip()
if not match:
return await e.eor("`Give Proper Input...`", time=5)
if ";" not in match:
return await e.eor("`Unable to Determine Options.`.", time=5)
ques = match.split(";")[0]
option = match.split(";")[1::]
publ = None
quizo = None
karzo = None
mpp = None
if "|" in match:
ptype = match.split(" | ")[1]
option = match.split("|")[0].split(";")[1::]
if "_" in ptype:
karzo = [str(int(ptype.split("_")[1]) - 1).encode()]
ptype = ptype.split("_")[0]
if ptype not in ["public", "quiz", "multiple"]:
return await e.eor("`Invalid Poll Type...`", time=5)
if ptype == "multiple":
mpp = True
elif ptype == "public":
publ = True
elif ptype == "quiz":
quizo = True
if len(option) <= 1:
return await e.eor("`Options Should be More than 1..`", time=5)
m = await e.eor(get_string("com_1"))
OUT = [PollAnswer(option[on], str(on).encode()) for on in range(len(option))]
await e.respond(
file=InputMediaPoll(
Poll(20, ques, OUT, multiple_choice=mpp, public_voters=publ, quiz=quizo),
correct_answers=karzo,
),
)
await m.delete() | null |
6,029 | import time
import numpy as np
import sys
import random
import os
import warnings
import torch
from torch.utils.tensorboard import SummaryWriter
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.utils.data.distributed
from tqdm import tqdm
from data_process.kitti_dataloader import create_train_dataloader, create_val_dataloader
from models.model_utils import create_model, make_data_parallel, get_num_parameters
from utils.train_utils import create_optimizer, create_lr_scheduler, get_saved_state, save_checkpoint
from utils.train_utils import reduce_tensor, to_python_float, get_tensorboard_log
from utils.misc import AverageMeter, ProgressMeter
from utils.logger import Logger
from config.train_config import parse_train_configs
from evaluate import evaluate_mAP
def cleanup():
dist.destroy_process_group()
def train_one_epoch(train_dataloader, model, optimizer, lr_scheduler, epoch, configs, logger, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(train_dataloader), [batch_time, data_time, losses],
prefix="Train - Epoch: [{}/{}]".format(epoch, configs.num_epochs))
num_iters_per_epoch = len(train_dataloader)
# switch to train mode
model.train()
start_time = time.time()
for batch_idx, batch_data in enumerate(tqdm(train_dataloader)):
data_time.update(time.time() - start_time)
_, imgs, targets = batch_data
global_step = num_iters_per_epoch * (epoch - 1) + batch_idx + 1
batch_size = imgs.size(0)
targets = targets.to(configs.device, non_blocking=True)
imgs = imgs.to(configs.device, non_blocking=True)
total_loss, outputs = model(imgs, targets)
# For torch.nn.DataParallel case
if (not configs.distributed) and (configs.gpu_idx is None):
total_loss = torch.mean(total_loss)
# compute gradient and perform backpropagation
total_loss.backward()
if global_step % configs.subdivisions == 0:
optimizer.step()
# Adjust learning rate
if configs.step_lr_in_epoch:
lr_scheduler.step()
if tb_writer is not None:
tb_writer.add_scalar('LR', lr_scheduler.get_lr()[0], global_step)
# zero the parameter gradients
optimizer.zero_grad()
if configs.distributed:
reduced_loss = reduce_tensor(total_loss.data, configs.world_size)
else:
reduced_loss = total_loss.data
losses.update(to_python_float(reduced_loss), batch_size)
# measure elapsed time
# torch.cuda.synchronize()
batch_time.update(time.time() - start_time)
if tb_writer is not None:
if (global_step % configs.tensorboard_freq) == 0:
tensorboard_log = get_tensorboard_log(model)
tb_writer.add_scalar('avg_loss', losses.avg, global_step)
for layer_name, layer_dict in tensorboard_log.items():
tb_writer.add_scalars(layer_name, layer_dict, global_step)
# Log message
if logger is not None:
if (global_step % configs.print_freq) == 0:
logger.info(progress.get_message(batch_idx))
start_time = time.time()
def create_train_dataloader(configs):
"""Create dataloader for training"""
train_lidar_transforms = OneOf([
Random_Rotation(limit_angle=20., p=1.0),
Random_Scaling(scaling_range=(0.95, 1.05), p=1.0)
], p=0.66)
train_aug_transforms = Compose([
Horizontal_Flip(p=configs.hflip_prob),
Cutout(n_holes=configs.cutout_nholes, ratio=configs.cutout_ratio, fill_value=configs.cutout_fill_value,
p=configs.cutout_prob)
], p=1.)
train_dataset = KittiDataset(configs.dataset_dir, mode='train', lidar_transforms=train_lidar_transforms,
aug_transforms=train_aug_transforms, multiscale=configs.multiscale_training,
num_samples=configs.num_samples, mosaic=configs.mosaic,
random_padding=configs.random_padding)
train_sampler = None
if configs.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),
pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler,
collate_fn=train_dataset.collate_fn)
return train_dataloader, train_sampler
def create_val_dataloader(configs):
"""Create dataloader for validation"""
val_sampler = None
val_dataset = KittiDataset(configs.dataset_dir, mode='val', lidar_transforms=None, aug_transforms=None,
multiscale=False, num_samples=configs.num_samples, mosaic=False, random_padding=False)
if configs.distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
val_dataloader = DataLoader(val_dataset, batch_size=configs.batch_size, shuffle=False,
pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=val_sampler,
collate_fn=val_dataset.collate_fn)
return val_dataloader
def create_model(configs):
"""Create model based on architecture name"""
if (configs.arch == 'darknet') and (configs.cfgfile is not None):
print('using darknet')
model = Darknet(cfgfile=configs.cfgfile, use_giou_loss=configs.use_giou_loss)
else:
assert False, 'Undefined model backbone'
return model
def get_num_parameters(model):
"""Count number of trained parameters of the model"""
if hasattr(model, 'module'):
num_parameters = sum(p.numel() for p in model.module.parameters() if p.requires_grad)
else:
num_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
return num_parameters
def make_data_parallel(model, configs):
if configs.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if configs.gpu_idx is not None:
torch.cuda.set_device(configs.gpu_idx)
model.cuda(configs.gpu_idx)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
configs.batch_size = int(configs.batch_size / configs.ngpus_per_node)
configs.num_workers = int((configs.num_workers + configs.ngpus_per_node - 1) / configs.ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[configs.gpu_idx])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif configs.gpu_idx is not None:
torch.cuda.set_device(configs.gpu_idx)
model = model.cuda(configs.gpu_idx)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
return model
def create_optimizer(configs, model):
"""Create optimizer for training process
Refer from https://github.com/ultralytics/yolov3/blob/e80cc2b80e3fd46395e8ec75f843960100927ff2/train.py#L94
"""
if hasattr(model, 'module'):
params_dict = dict(model.module.named_parameters())
else:
params_dict = dict(model.named_parameters())
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in params_dict.items():
if '.bias' in k:
pg2 += [v] # biases
elif ('conv' in k) and ('.weight' in k):
pg1 += [v] # apply weight_decay
else:
pg0 += [v] # all else
if configs.optimizer_type == 'sgd':
optimizer = torch.optim.SGD(pg0, lr=configs.lr, momentum=configs.momentum, nesterov=True)
elif configs.optimizer_type == 'adam':
optimizer = torch.optim.Adam(pg0, lr=configs.lr)
else:
assert False, "Unknown optimizer type"
optimizer.add_param_group({'params': pg1, 'weight_decay': configs.weight_decay}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
return optimizer
def create_lr_scheduler(optimizer, configs):
"""Create learning rate scheduler for training process"""
if configs.lr_type == 'multi_step':
def burnin_schedule(i):
if i < configs.burn_in:
factor = pow(i / configs.burn_in, 4)
elif i < configs.steps[0]:
factor = 1.0
elif i < configs.steps[1]:
factor = 0.1
else:
factor = 0.01
return factor
lr_scheduler = LambdaLR(optimizer, burnin_schedule)
elif configs.lr_type == 'cosin':
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
lf = lambda x: (((1 + math.cos(x * math.pi / configs.num_epochs)) / 2) ** 1.0) * 0.9 + 0.1 # cosine
lr_scheduler = LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, lr_scheduler, configs.num_epochs, save_dir=configs.logs_dir)
else:
raise ValueError
return lr_scheduler
def get_saved_state(model, optimizer, lr_scheduler, epoch, configs):
"""Get the information to save with checkpoints"""
if hasattr(model, 'module'):
model_state_dict = model.module.state_dict()
else:
model_state_dict = model.state_dict()
utils_state_dict = {
'epoch': epoch,
'configs': configs,
'optimizer': copy.deepcopy(optimizer.state_dict()),
'lr_scheduler': copy.deepcopy(lr_scheduler.state_dict())
}
return model_state_dict, utils_state_dict
def save_checkpoint(checkpoints_dir, saved_fn, model_state_dict, utils_state_dict, epoch):
"""Save checkpoint every epoch only is best model or after every checkpoint_freq epoch"""
model_save_path = os.path.join(checkpoints_dir, 'Model_{}_epoch_{}.pth'.format(saved_fn, epoch))
utils_save_path = os.path.join(checkpoints_dir, 'Utils_{}_epoch_{}.pth'.format(saved_fn, epoch))
torch.save(model_state_dict, model_save_path)
torch.save(utils_state_dict, utils_save_path)
print('save a checkpoint at {}'.format(model_save_path))
class Logger():
"""
Create logger to save logs during training
Args:
logs_dir:
saved_fn:
Returns:
"""
def __init__(self, logs_dir, saved_fn):
logger_fn = 'logger_{}.txt'.format(saved_fn)
logger_path = os.path.join(logs_dir, logger_fn)
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
# formatter = logging.Formatter('%(asctime)s:File %(module)s.py:Func %(funcName)s:Line %(lineno)d:%(levelname)s: %(message)s')
formatter = logging.Formatter(
'%(asctime)s: %(module)s.py - %(funcName)s(), at Line %(lineno)d:%(levelname)s:\n%(message)s')
file_handler = logging.FileHandler(logger_path)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
self.logger.addHandler(stream_handler)
def info(self, message):
self.logger.info(message)
def evaluate_mAP(val_loader, model, configs, logger):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
progress = ProgressMeter(len(val_loader), [batch_time, data_time],
prefix="Evaluation phase...")
labels = []
sample_metrics = [] # List of tuples (TP, confs, pred)
# switch to evaluate mode
model.eval()
with torch.no_grad():
start_time = time.time()
for batch_idx, batch_data in enumerate(tqdm(val_loader)):
data_time.update(time.time() - start_time)
_, imgs, targets = batch_data
# Extract labels
labels += targets[:, 1].tolist()
# Rescale x, y, w, h of targets ((box_idx, class, x, y, w, l, im, re))
targets[:, 2:6] *= configs.img_size
imgs = imgs.to(configs.device, non_blocking=True)
outputs = model(imgs)
outputs = post_processing_v2(outputs, conf_thresh=configs.conf_thresh, nms_thresh=configs.nms_thresh)
sample_metrics += get_batch_statistics_rotated_bbox(outputs, targets, iou_threshold=configs.iou_thresh)
# measure elapsed time
# torch.cuda.synchronize()
batch_time.update(time.time() - start_time)
# Log message
if logger is not None:
if ((batch_idx + 1) % configs.print_freq) == 0:
logger.info(progress.get_message(batch_idx))
start_time = time.time()
# Concatenate sample statistics
true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)
return precision, recall, AP, f1, ap_class
def main_worker(gpu_idx, configs):
configs.gpu_idx = gpu_idx
configs.device = torch.device('cpu' if configs.gpu_idx is None else 'cuda:{}'.format(configs.gpu_idx))
if configs.distributed:
if configs.dist_url == "env://" and configs.rank == -1:
configs.rank = int(os.environ["RANK"])
if configs.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
configs.rank = configs.rank * configs.ngpus_per_node + gpu_idx
dist.init_process_group(backend=configs.dist_backend, init_method=configs.dist_url,
world_size=configs.world_size, rank=configs.rank)
configs.subdivisions = int(64 / configs.batch_size / configs.ngpus_per_node)
else:
configs.subdivisions = int(64 / configs.batch_size)
configs.is_master_node = (not configs.distributed) or (
configs.distributed and (configs.rank % configs.ngpus_per_node == 0))
if configs.is_master_node:
logger = Logger(configs.logs_dir, configs.saved_fn)
logger.info('>>> Created a new logger')
logger.info('>>> configs: {}'.format(configs))
tb_writer = SummaryWriter(log_dir=os.path.join(configs.logs_dir, 'tensorboard'))
else:
logger = None
tb_writer = None
# model
model = create_model(configs)
# load weight from a checkpoint
if configs.pretrained_path is not None:
assert os.path.isfile(configs.pretrained_path), "=> no checkpoint found at '{}'".format(configs.pretrained_path)
model.load_state_dict(torch.load(configs.pretrained_path))
if logger is not None:
logger.info('loaded pretrained model at {}'.format(configs.pretrained_path))
# resume weights of model from a checkpoint
if configs.resume_path is not None:
assert os.path.isfile(configs.resume_path), "=> no checkpoint found at '{}'".format(configs.resume_path)
model.load_state_dict(torch.load(configs.resume_path))
if logger is not None:
logger.info('resume training model from checkpoint {}'.format(configs.resume_path))
# Data Parallel
model = make_data_parallel(model, configs)
# Make sure to create optimizer after moving the model to cuda
optimizer = create_optimizer(configs, model)
lr_scheduler = create_lr_scheduler(optimizer, configs)
configs.step_lr_in_epoch = True if configs.lr_type in ['multi_step'] else False
# resume optimizer, lr_scheduler from a checkpoint
if configs.resume_path is not None:
utils_path = configs.resume_path.replace('Model_', 'Utils_')
assert os.path.isfile(utils_path), "=> no checkpoint found at '{}'".format(utils_path)
utils_state_dict = torch.load(utils_path, map_location='cuda:{}'.format(configs.gpu_idx))
optimizer.load_state_dict(utils_state_dict['optimizer'])
lr_scheduler.load_state_dict(utils_state_dict['lr_scheduler'])
configs.start_epoch = utils_state_dict['epoch'] + 1
if configs.is_master_node:
num_parameters = get_num_parameters(model)
logger.info('number of trained parameters of the model: {}'.format(num_parameters))
if logger is not None:
logger.info(">>> Loading dataset & getting dataloader...")
# Create dataloader
train_dataloader, train_sampler = create_train_dataloader(configs)
if logger is not None:
logger.info('number of batches in training set: {}'.format(len(train_dataloader)))
if configs.evaluate:
val_dataloader = create_val_dataloader(configs)
precision, recall, AP, f1, ap_class = evaluate_mAP(val_dataloader, model, configs, None)
print('Evaluate - precision: {}, recall: {}, AP: {}, f1: {}, ap_class: {}'.format(precision, recall, AP, f1,
ap_class))
print('mAP {}'.format(AP.mean()))
return
for epoch in range(configs.start_epoch, configs.num_epochs + 1):
if logger is not None:
logger.info('{}'.format('*-' * 40))
logger.info('{} {}/{} {}'.format('=' * 35, epoch, configs.num_epochs, '=' * 35))
logger.info('{}'.format('*-' * 40))
logger.info('>>> Epoch: [{}/{}]'.format(epoch, configs.num_epochs))
if configs.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train_one_epoch(train_dataloader, model, optimizer, lr_scheduler, epoch, configs, logger, tb_writer)
if not configs.no_val:
val_dataloader = create_val_dataloader(configs)
print('number of batches in val_dataloader: {}'.format(len(val_dataloader)))
precision, recall, AP, f1, ap_class = evaluate_mAP(val_dataloader, model, configs, logger)
val_metrics_dict = {
'precision': precision.mean(),
'recall': recall.mean(),
'AP': AP.mean(),
'f1': f1.mean(),
'ap_class': ap_class.mean()
}
if tb_writer is not None:
tb_writer.add_scalars('Validation', val_metrics_dict, epoch)
# Save checkpoint
if configs.is_master_node and ((epoch % configs.checkpoint_freq) == 0):
model_state_dict, utils_state_dict = get_saved_state(model, optimizer, lr_scheduler, epoch, configs)
save_checkpoint(configs.checkpoints_dir, configs.saved_fn, model_state_dict, utils_state_dict, epoch)
if not configs.step_lr_in_epoch:
lr_scheduler.step()
if tb_writer is not None:
tb_writer.add_scalar('LR', lr_scheduler.get_lr()[0], epoch)
if tb_writer is not None:
tb_writer.close()
if configs.distributed:
cleanup() | null |
6,030 | import os
import argparse
import torch
from easydict import EasyDict as edict
def parse_train_configs():
parser = argparse.ArgumentParser(description='The Implementation of Complex YOLOv4')
parser.add_argument('--seed', type=int, default=2020,
help='re-produce the results with seed random')
parser.add_argument('--saved_fn', type=str, default='complexer_yolo', metavar='FN',
help='The name using for saving logs, models,...')
parser.add_argument('--working-dir', type=str, default='../', metavar='PATH',
help='The ROOT working directory')
####################################################################
############## Model configs ########################
####################################################################
parser.add_argument('-a', '--arch', type=str, default='darknet', metavar='ARCH',
help='The name of the model architecture')
parser.add_argument('--cfgfile', type=str, default='config/cfg/complex_yolov4.cfg', metavar='PATH',
help='The path for cfgfile (only for darknet)')
parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',
help='the path of the pretrained checkpoint')
parser.add_argument('--use_giou_loss', action='store_true',
help='If true, use GIoU loss during training. If false, use MSE loss for training')
####################################################################
############## Dataloader and Running configs #######
####################################################################
parser.add_argument('--img_size', type=int, default=608,
help='the size of input image')
parser.add_argument('--hflip_prob', type=float, default=0.5,
help='The probability of horizontal flip')
parser.add_argument('--cutout_prob', type=float, default=0.,
help='The probability of cutout augmentation')
parser.add_argument('--cutout_nholes', type=int, default=1,
help='The number of cutout area')
parser.add_argument('--cutout_ratio', type=float, default=0.3,
help='The max ratio of the cutout area')
parser.add_argument('--cutout_fill_value', type=float, default=0.,
help='The fill value in the cut out area, default 0. (black)')
parser.add_argument('--multiscale_training', action='store_true',
help='If true, use scaling data for training')
parser.add_argument('--mosaic', action='store_true',
help='If true, compose training samples as mosaics')
parser.add_argument('--random-padding', action='store_true',
help='If true, random padding if using mosaic augmentation')
parser.add_argument('--no-val', action='store_true',
help='If true, dont evaluate the model on the val set')
parser.add_argument('--num_samples', type=int, default=None,
help='Take a subset of the dataset to run and debug')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of threads for loading data')
parser.add_argument('--batch_size', type=int, default=4,
help='mini-batch size (default: 4), this is the total'
'batch size of all GPUs on the current node when using'
'Data Parallel or Distributed Data Parallel')
parser.add_argument('--print_freq', type=int, default=50, metavar='N',
help='print frequency (default: 50)')
parser.add_argument('--tensorboard_freq', type=int, default=50, metavar='N',
help='frequency of saving tensorboard (default: 50)')
parser.add_argument('--checkpoint_freq', type=int, default=5, metavar='N',
help='frequency of saving checkpoints (default: 5)')
####################################################################
############## Training strategy ####################
####################################################################
parser.add_argument('--start_epoch', type=int, default=1, metavar='N',
help='the starting epoch')
parser.add_argument('--num_epochs', type=int, default=300, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr_type', type=str, default='cosin',
help='the type of learning rate scheduler (cosin or multi_step)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='initial learning rate')
parser.add_argument('--minimum_lr', type=float, default=1e-7, metavar='MIN_LR',
help='minimum learning rate during training')
parser.add_argument('--momentum', type=float, default=0.949, metavar='M',
help='momentum')
parser.add_argument('-wd', '--weight_decay', type=float, default=5e-4, metavar='WD',
help='weight decay (default: 5e-4)')
parser.add_argument('--optimizer_type', type=str, default='adam', metavar='OPTIMIZER',
help='the type of optimizer, it can be sgd or adam')
parser.add_argument('--burn_in', type=int, default=50, metavar='N',
help='number of burn in step')
parser.add_argument('--steps', nargs='*', default=[1500, 4000],
help='number of burn in step')
####################################################################
############## Loss weight ##########################
####################################################################
####################################################################
############## Distributed Data Parallel ############
####################################################################
parser.add_argument('--world-size', default=-1, type=int, metavar='N',
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, metavar='N',
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--gpu_idx', default=None, type=int,
help='GPU index to use.')
parser.add_argument('--no_cuda', action='store_true',
help='If true, cuda is not used.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
####################################################################
############## Evaluation configurations ###################
####################################################################
parser.add_argument('--evaluate', action='store_true',
help='only evaluate the model, not training')
parser.add_argument('--resume_path', type=str, default=None, metavar='PATH',
help='the path of the resumed checkpoint')
parser.add_argument('--conf-thresh', type=float, default=0.5,
help='for evaluation - the threshold for class conf')
parser.add_argument('--nms-thresh', type=float, default=0.5,
help='for evaluation - the threshold for nms')
parser.add_argument('--iou-thresh', type=float, default=0.5,
help='for evaluation - the threshold for IoU')
configs = edict(vars(parser.parse_args()))
####################################################################
############## Hardware configurations #############################
####################################################################
configs.device = torch.device('cpu' if configs.no_cuda else 'cuda')
configs.ngpus_per_node = torch.cuda.device_count()
configs.pin_memory = True
####################################################################
############## Dataset, logs, Checkpoints dir ######################
####################################################################
configs.dataset_dir = os.path.join(configs.working_dir, 'dataset', 'kitti')
configs.checkpoints_dir = os.path.join(configs.working_dir, 'checkpoints', configs.saved_fn)
configs.logs_dir = os.path.join(configs.working_dir, 'logs', configs.saved_fn)
if not os.path.isdir(configs.checkpoints_dir):
os.makedirs(configs.checkpoints_dir)
if not os.path.isdir(configs.logs_dir):
os.makedirs(configs.logs_dir)
return configs | null |
6,031 | import math
import sys
import cv2
import numpy as np
import config.kitti_config as cnf
def removePoints(PointCloud, BoundaryCond):
# Boundary condition
minX = BoundaryCond['minX']
maxX = BoundaryCond['maxX']
minY = BoundaryCond['minY']
maxY = BoundaryCond['maxY']
minZ = BoundaryCond['minZ']
maxZ = BoundaryCond['maxZ']
# Remove the point out of range x,y,z
mask = np.where((PointCloud[:, 0] >= minX) & (PointCloud[:, 0] <= maxX) & (PointCloud[:, 1] >= minY) & (
PointCloud[:, 1] <= maxY) & (PointCloud[:, 2] >= minZ) & (PointCloud[:, 2] <= maxZ))
PointCloud = PointCloud[mask]
PointCloud[:, 2] = PointCloud[:, 2] - minZ
return PointCloud | null |
6,032 | import math
import sys
import cv2
import numpy as np
import config.kitti_config as cnf
def makeBVFeature(PointCloud_, Discretization, bc):
Height = cnf.BEV_HEIGHT + 1
Width = cnf.BEV_WIDTH + 1
# Discretize Feature Map
PointCloud = np.copy(PointCloud_)
PointCloud[:, 0] = np.int_(np.floor(PointCloud[:, 0] / Discretization))
PointCloud[:, 1] = np.int_(np.floor(PointCloud[:, 1] / Discretization) + Width / 2)
# sort-3times
indices = np.lexsort((-PointCloud[:, 2], PointCloud[:, 1], PointCloud[:, 0]))
PointCloud = PointCloud[indices]
# Height Map
heightMap = np.zeros((Height, Width))
_, indices = np.unique(PointCloud[:, 0:2], axis=0, return_index=True)
PointCloud_frac = PointCloud[indices]
# some important problem is image coordinate is (y,x), not (x,y)
max_height = float(np.abs(bc['maxZ'] - bc['minZ']))
heightMap[np.int_(PointCloud_frac[:, 0]), np.int_(PointCloud_frac[:, 1])] = PointCloud_frac[:, 2] / max_height
# Intensity Map & DensityMap
intensityMap = np.zeros((Height, Width))
densityMap = np.zeros((Height, Width))
_, indices, counts = np.unique(PointCloud[:, 0:2], axis=0, return_index=True, return_counts=True)
PointCloud_top = PointCloud[indices]
normalizedCounts = np.minimum(1.0, np.log(counts + 1) / np.log(64))
intensityMap[np.int_(PointCloud_top[:, 0]), np.int_(PointCloud_top[:, 1])] = PointCloud_top[:, 3]
densityMap[np.int_(PointCloud_top[:, 0]), np.int_(PointCloud_top[:, 1])] = normalizedCounts
RGB_Map = np.zeros((3, Height - 1, Width - 1))
RGB_Map[2, :, :] = densityMap[:cnf.BEV_HEIGHT, :cnf.BEV_WIDTH] # r_map
RGB_Map[1, :, :] = heightMap[:cnf.BEV_HEIGHT, :cnf.BEV_WIDTH] # g_map
RGB_Map[0, :, :] = intensityMap[:cnf.BEV_HEIGHT, :cnf.BEV_WIDTH] # b_map
return RGB_Map | null |
6,033 | import sys
import torch
from torch.utils.data import DataLoader
from data_process.kitti_dataset import KittiDataset
from data_process.transformation import Compose, OneOf, Random_Rotation, Random_Scaling, Horizontal_Flip, Cutout
class KittiDataset(Dataset):
def __init__(self, dataset_dir, mode='train', lidar_transforms=None, aug_transforms=None, multiscale=False,
num_samples=None, mosaic=False, random_padding=False):
self.dataset_dir = dataset_dir
assert mode in ['train', 'val', 'test'], 'Invalid mode: {}'.format(mode)
self.mode = mode
self.is_test = (self.mode == 'test')
sub_folder = 'testing' if self.is_test else 'training'
self.multiscale = multiscale
self.lidar_transforms = lidar_transforms
self.aug_transforms = aug_transforms
self.img_size = cnf.BEV_WIDTH
self.min_size = self.img_size - 3 * 32
self.max_size = self.img_size + 3 * 32
self.batch_count = 0
self.mosaic = mosaic
self.random_padding = random_padding
self.mosaic_border = [-self.img_size // 2, -self.img_size // 2]
self.lidar_dir = os.path.join(self.dataset_dir, sub_folder, "velodyne")
self.image_dir = os.path.join(self.dataset_dir, sub_folder, "image_2")
self.calib_dir = os.path.join(self.dataset_dir, sub_folder, "calib")
self.label_dir = os.path.join(self.dataset_dir, sub_folder, "label_2")
split_txt_path = os.path.join(self.dataset_dir, 'ImageSets', '{}.txt'.format(mode))
self.image_idx_list = [x.strip() for x in open(split_txt_path).readlines()]
if self.is_test:
self.sample_id_list = [int(sample_id) for sample_id in self.image_idx_list]
else:
self.sample_id_list = self.remove_invalid_idx(self.image_idx_list)
if num_samples is not None:
self.sample_id_list = self.sample_id_list[:num_samples]
self.num_samples = len(self.sample_id_list)
def __getitem__(self, index):
if self.is_test:
return self.load_img_only(index)
else:
if self.mosaic:
img_files, rgb_map, targets = self.load_mosaic(index)
return img_files[0], rgb_map, targets
else:
return self.load_img_with_targets(index)
def load_img_only(self, index):
"""Load only image for the testing phase"""
sample_id = int(self.sample_id_list[index])
lidarData = self.get_lidar(sample_id)
b = kitti_bev_utils.removePoints(lidarData, cnf.boundary)
rgb_map = kitti_bev_utils.makeBVFeature(b, cnf.DISCRETIZATION, cnf.boundary)
img_file = os.path.join(self.image_dir, '{:06d}.png'.format(sample_id))
return img_file, rgb_map
def load_img_with_targets(self, index):
"""Load images and targets for the training and validation phase"""
sample_id = int(self.sample_id_list[index])
lidarData = self.get_lidar(sample_id)
objects = self.get_label(sample_id)
calib = self.get_calib(sample_id)
labels, noObjectLabels = kitti_bev_utils.read_labels_for_bevbox(objects)
if not noObjectLabels:
labels[:, 1:] = transformation.camera_to_lidar_box(labels[:, 1:], calib.V2C, calib.R0,
calib.P) # convert rect cam to velo cord
if self.lidar_transforms is not None:
lidarData, labels[:, 1:] = self.lidar_transforms(lidarData, labels[:, 1:])
b = kitti_bev_utils.removePoints(lidarData, cnf.boundary)
rgb_map = kitti_bev_utils.makeBVFeature(b, cnf.DISCRETIZATION, cnf.boundary)
target = kitti_bev_utils.build_yolo_target(labels)
img_file = os.path.join(self.image_dir, '{:06d}.png'.format(sample_id))
# on image space: targets are formatted as (box_idx, class, x, y, w, l, im, re)
n_target = len(target)
targets = torch.zeros((n_target, 8))
if n_target > 0:
targets[:, 1:] = torch.from_numpy(target)
rgb_map = torch.from_numpy(rgb_map).float()
if self.aug_transforms is not None:
rgb_map, targets = self.aug_transforms(rgb_map, targets)
return img_file, rgb_map, targets
def load_mosaic(self, index):
"""loads images in a mosaic
Refer: https://github.com/ultralytics/yolov5/blob/master/utils/datasets.py
"""
targets_s4 = []
img_file_s4 = []
if self.random_padding:
yc, xc = [int(random.uniform(-x, 2 * self.img_size + x)) for x in self.mosaic_border] # mosaic center
else:
yc, xc = [self.img_size, self.img_size] # mosaic center
indices = [index] + [random.randint(0, self.num_samples - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
img_file, img, targets = self.load_img_with_targets(index)
img_file_s4.append(img_file)
c, h, w = img.size() # (3, 608, 608), torch tensor
# place img in img4
if i == 0: # top left
img_s4 = torch.full((c, self.img_size * 2, self.img_size * 2), 0.5, dtype=torch.float)
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, self.img_size * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(self.img_size * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, self.img_size * 2), min(self.img_size * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img_s4[:, y1a:y2a, x1a:x2a] = img[:, y1b:y2b, x1b:x2b] # img_s4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# on image space: targets are formatted as (box_idx, class, x, y, w, l, sin(yaw), cos(yaw))
if targets.size(0) > 0:
targets[:, 2] = (targets[:, 2] * w + padw) / (2 * self.img_size)
targets[:, 3] = (targets[:, 3] * h + padh) / (2 * self.img_size)
targets[:, 4] = targets[:, 4] * w / (2 * self.img_size)
targets[:, 5] = targets[:, 5] * h / (2 * self.img_size)
targets_s4.append(targets)
if len(targets_s4) > 0:
targets_s4 = torch.cat(targets_s4, 0)
torch.clamp(targets_s4[:, 2:4], min=0., max=(1. - 0.5 / self.img_size), out=targets_s4[:, 2:4])
return img_file_s4, img_s4, targets_s4
def __len__(self):
return len(self.sample_id_list)
def remove_invalid_idx(self, image_idx_list):
"""Discard samples which don't have current training class objects, which will not be used for training."""
sample_id_list = []
for sample_id in image_idx_list:
sample_id = int(sample_id)
objects = self.get_label(sample_id)
calib = self.get_calib(sample_id)
labels, noObjectLabels = kitti_bev_utils.read_labels_for_bevbox(objects)
if not noObjectLabels:
labels[:, 1:] = transformation.camera_to_lidar_box(labels[:, 1:], calib.V2C, calib.R0,
calib.P) # convert rect cam to velo cord
valid_list = []
for i in range(labels.shape[0]):
if int(labels[i, 0]) in cnf.CLASS_NAME_TO_ID.values():
if self.check_point_cloud_range(labels[i, 1:4]):
valid_list.append(labels[i, 0])
if len(valid_list) > 0:
sample_id_list.append(sample_id)
return sample_id_list
def check_point_cloud_range(self, xyz):
"""
:param xyz: [x, y, z]
:return:
"""
x_range = [cnf.boundary["minX"], cnf.boundary["maxX"]]
y_range = [cnf.boundary["minY"], cnf.boundary["maxY"]]
z_range = [cnf.boundary["minZ"], cnf.boundary["maxZ"]]
if (x_range[0] <= xyz[0] <= x_range[1]) and (y_range[0] <= xyz[1] <= y_range[1]) and \
(z_range[0] <= xyz[2] <= z_range[1]):
return True
return False
def collate_fn(self, batch):
paths, imgs, targets = list(zip(*batch))
# Remove empty placeholder targets
targets = [boxes for boxes in targets if boxes is not None]
# Add sample index to targets
for i, boxes in enumerate(targets):
boxes[:, 0] = i
targets = torch.cat(targets, 0)
# Selects new image size every tenth batch
if (self.batch_count % 10 == 0) and self.multiscale and (not self.mosaic):
self.img_size = random.choice(range(self.min_size, self.max_size + 1, 32))
# Resize images to input shape
imgs = torch.stack(imgs)
if self.img_size != cnf.BEV_WIDTH:
imgs = F.interpolate(imgs, size=self.img_size, mode="bilinear", align_corners=True)
self.batch_count += 1
return paths, imgs, targets
def get_image(self, idx):
img_file = os.path.join(self.image_dir, '{:06d}.png'.format(idx))
# assert os.path.isfile(img_file)
return cv2.imread(img_file) # (H, W, C) -> (H, W, 3) OpenCV reads in BGR mode
def get_lidar(self, idx):
lidar_file = os.path.join(self.lidar_dir, '{:06d}.bin'.format(idx))
# assert os.path.isfile(lidar_file)
return np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)
def get_calib(self, idx):
calib_file = os.path.join(self.calib_dir, '{:06d}.txt'.format(idx))
# assert os.path.isfile(calib_file)
return kitti_data_utils.Calibration(calib_file)
def get_label(self, idx):
label_file = os.path.join(self.label_dir, '{:06d}.txt'.format(idx))
# assert os.path.isfile(label_file)
return kitti_data_utils.read_label(label_file)
The provided code snippet includes necessary dependencies for implementing the `create_test_dataloader` function. Write a Python function `def create_test_dataloader(configs)` to solve the following problem:
Create dataloader for testing phase
Here is the function:
def create_test_dataloader(configs):
"""Create dataloader for testing phase"""
test_dataset = KittiDataset(configs.dataset_dir, mode='test', lidar_transforms=None, aug_transforms=None,
multiscale=False, num_samples=configs.num_samples, mosaic=False, random_padding=False)
test_sampler = None
if configs.distributed:
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, batch_size=configs.batch_size, shuffle=False,
pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=test_sampler)
return test_dataloader | Create dataloader for testing phase |
6,034 | import sys
import math
import numpy as np
import torch
from config import kitti_config as cnf
def camera_to_lidar_point(points):
# (N, 3) -> (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))]).T # (N,4) -> (4,N)
points = np.matmul(cnf.R0_inv, points)
points = np.matmul(cnf.Tr_velo_to_cam_inv, points).T # (4, N) -> (N, 4)
points = points[:, 0:3]
return points.reshape(-1, 3) | null |
6,035 | import sys
import math
import numpy as np
import torch
from config import kitti_config as cnf
def center_to_corner_box3d(boxes_center, coordinate='lidar'):
# (N, 7) -> (N, 8, 3)
N = boxes_center.shape[0]
ret = np.zeros((N, 8, 3), dtype=np.float32)
if coordinate == 'camera':
boxes_center = camera_to_lidar_box(boxes_center)
for i in range(N):
box = boxes_center[i]
translation = box[0:3]
size = box[3:6]
rotation = [0, 0, box[-1]]
h, w, l = size[0], size[1], size[2]
trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \
[0, 0, 0, 0, h, h, h, h]])
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2]
rotMat = np.array([
[np.cos(yaw), -np.sin(yaw), 0.0],
[np.sin(yaw), np.cos(yaw), 0.0],
[0.0, 0.0, 1.0]])
cornerPosInVelo = np.dot(rotMat, trackletBox) + \
np.tile(translation, (8, 1)).T
box3d = cornerPosInVelo.transpose()
ret[i] = box3d
if coordinate == 'camera':
for idx in range(len(ret)):
ret[idx] = lidar_to_camera_point(ret[idx])
return ret
def center_to_corner_box2d(boxes_center, coordinate='lidar'):
# (N, 5) -> (N, 4, 2)
N = boxes_center.shape[0]
boxes3d_center = np.zeros((N, 7))
boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center
boxes3d_corner = center_to_corner_box3d(
boxes3d_center, coordinate=coordinate)
return boxes3d_corner[:, 0:4, 0:2] | null |
6,036 | import sys
import math
import numpy as np
import torch
from config import kitti_config as cnf
def center_to_corner_box3d(boxes_center, coordinate='lidar'):
# (N, 7) -> (N, 8, 3)
N = boxes_center.shape[0]
ret = np.zeros((N, 8, 3), dtype=np.float32)
if coordinate == 'camera':
boxes_center = camera_to_lidar_box(boxes_center)
for i in range(N):
box = boxes_center[i]
translation = box[0:3]
size = box[3:6]
rotation = [0, 0, box[-1]]
h, w, l = size[0], size[1], size[2]
trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \
[0, 0, 0, 0, h, h, h, h]])
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2]
rotMat = np.array([
[np.cos(yaw), -np.sin(yaw), 0.0],
[np.sin(yaw), np.cos(yaw), 0.0],
[0.0, 0.0, 1.0]])
cornerPosInVelo = np.dot(rotMat, trackletBox) + \
np.tile(translation, (8, 1)).T
box3d = cornerPosInVelo.transpose()
ret[i] = box3d
if coordinate == 'camera':
for idx in range(len(ret)):
ret[idx] = lidar_to_camera_point(ret[idx])
return ret
def corner_to_center_box3d(boxes_corner, coordinate='camera'):
# (N, 8, 3) -> (N, 7) x,y,z,h,w,l,ry/z
if coordinate == 'lidar':
for idx in range(len(boxes_corner)):
boxes_corner[idx] = lidar_to_camera_point(boxes_corner[idx])
ret = []
for roi in boxes_corner:
if CORNER2CENTER_AVG: # average version
roi = np.array(roi)
h = abs(np.sum(roi[:4, 1] - roi[4:, 1]) / 4)
w = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
) / 4
l = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
) / 4
x = np.sum(roi[:, 0], axis=0) / 8
y = np.sum(roi[0:4, 1], axis=0) / 4
z = np.sum(roi[:, 2], axis=0) / 8
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = ry - np.pi / 2
elif l > w:
l, w = w, l
ry = ry - np.pi / 2
ret.append([x, y, z, h, w, l, ry])
else: # max version
h = max(abs(roi[:4, 1] - roi[4:, 1]))
w = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
)
l = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
)
x = np.sum(roi[:, 0], axis=0) / 8
y = np.sum(roi[0:4, 1], axis=0) / 4
z = np.sum(roi[:, 2], axis=0) / 8
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = angle_in_limit(ry + np.pi / 2)
ret.append([x, y, z, h, w, l, ry])
if coordinate == 'lidar':
ret = camera_to_lidar_box(np.array(ret))
return np.array(ret)
def point_transform(points, tx, ty, tz, rx=0, ry=0, rz=0):
# Input:
# points: (N, 3)
# rx/y/z: in radians
# Output:
# points: (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))])
mat1 = np.eye(4)
mat1[3, 0:3] = tx, ty, tz
points = np.matmul(points, mat1)
if rx != 0:
mat = np.zeros((4, 4))
mat[0, 0] = 1
mat[3, 3] = 1
mat[1, 1] = np.cos(rx)
mat[1, 2] = -np.sin(rx)
mat[2, 1] = np.sin(rx)
mat[2, 2] = np.cos(rx)
points = np.matmul(points, mat)
if ry != 0:
mat = np.zeros((4, 4))
mat[1, 1] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(ry)
mat[0, 2] = np.sin(ry)
mat[2, 0] = -np.sin(ry)
mat[2, 2] = np.cos(ry)
points = np.matmul(points, mat)
if rz != 0:
mat = np.zeros((4, 4))
mat[2, 2] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(rz)
mat[0, 1] = -np.sin(rz)
mat[1, 0] = np.sin(rz)
mat[1, 1] = np.cos(rz)
points = np.matmul(points, mat)
return points[:, 0:3]
def box_transform(boxes, tx, ty, tz, r=0, coordinate='lidar'):
# Input:
# boxes: (N, 7) x y z h w l rz/y
# Output:
# boxes: (N, 7) x y z h w l rz/y
boxes_corner = center_to_corner_box3d(
boxes, coordinate=coordinate) # (N, 8, 3)
for idx in range(len(boxes_corner)):
if coordinate == 'lidar':
boxes_corner[idx] = point_transform(
boxes_corner[idx], tx, ty, tz, rz=r)
else:
boxes_corner[idx] = point_transform(
boxes_corner[idx], tx, ty, tz, ry=r)
return corner_to_center_box3d(boxes_corner, coordinate=coordinate) | null |
6,037 | from __future__ import print_function
import numpy as np
import cv2
def rotx(t):
# 3D Rotation about the x-axis.
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0],
[0, c, -s],
[0, s, c]]) | null |
6,038 | from __future__ import print_function
import numpy as np
import cv2
def rotz(t):
# Rotation about the z-axis.
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]]) | null |
6,039 | from __future__ import print_function
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `transform_from_rot_trans` function. Write a Python function `def transform_from_rot_trans(R, t)` to solve the following problem:
Transforation matrix from rotation matrix and translation vector.
Here is the function:
def transform_from_rot_trans(R, t):
''' Transforation matrix from rotation matrix and translation vector. '''
R = R.reshape(3, 3)
t = t.reshape(3, 1)
return np.vstack((np.hstack([R, t]), [0, 0, 0, 1])) | Transforation matrix from rotation matrix and translation vector. |
6,040 | from __future__ import print_function
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `inverse_rigid_trans` function. Write a Python function `def inverse_rigid_trans(Tr)` to solve the following problem:
Inverse a rigid body transform matrix (3x4 as [R|t]) [R'|-R't; 0|1]
Here is the function:
def inverse_rigid_trans(Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])
inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
return inv_Tr | Inverse a rigid body transform matrix (3x4 as [R|t]) [R'|-R't; 0|1] |
6,041 | from __future__ import print_function
import numpy as np
import cv2
class Object3d(object):
''' 3d object label '''
def __init__(self, label_file_line):
data = label_file_line.split(' ')
data[1:] = [float(x) for x in data[1:]]
# extract label, truncation, occlusion
self.type = data[0] # 'Car', 'Pedestrian', ...
self.cls_id = self.cls_type_to_id(self.type)
self.truncation = data[1] # truncated pixel ratio [0..1]
self.occlusion = int(data[2]) # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
self.alpha = data[3] # object observation angle [-pi..pi]
# extract 2d bounding box in 0-based coordinates
self.xmin = data[4] # left
self.ymin = data[5] # top
self.xmax = data[6] # right
self.ymax = data[7] # bottom
self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax])
# extract 3d bounding box information
self.h = data[8] # box height
self.w = data[9] # box width
self.l = data[10] # box length (in meters)
self.t = (data[11], data[12], data[13]) # location (x,y,z) in camera coord.
self.dis_to_cam = np.linalg.norm(self.t)
self.ry = data[14] # yaw angle (around Y-axis in camera coordinates) [-pi..pi]
self.score = data[15] if data.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_obj_level()
def cls_type_to_id(self, cls_type):
# Car and Van ==> Car class
# Pedestrian and Person_Sitting ==> Pedestrian Class
CLASS_NAME_TO_ID = {
'Car': 0,
'Pedestrian': 1,
'Cyclist': 2,
'Van': 0,
'Person_sitting': 1
}
if cls_type not in CLASS_NAME_TO_ID.keys():
return -1
return CLASS_NAME_TO_ID[cls_type]
def get_obj_level(self):
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 1 # Easy
elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 2 # Moderate
elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 3 # Hard
else:
self.level_str = 'UnKnown'
return 4
def print_object(self):
print('Type, truncation, occlusion, alpha: %s, %d, %d, %f' % \
(self.type, self.truncation, self.occlusion, self.alpha))
print('2d bbox (x0,y0,x1,y1): %f, %f, %f, %f' % \
(self.xmin, self.ymin, self.xmax, self.ymax))
print('3d bbox h,w,l: %f, %f, %f' % \
(self.h, self.w, self.l))
print('3d bbox location, ry: (%f, %f, %f), %f' % \
(self.t[0], self.t[1], self.t[2], self.ry))
def to_kitti_format(self):
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.t[0], self.t[1], self.t[2],
self.ry, self.score)
return kitti_str
def read_label(label_filename):
lines = [line.rstrip() for line in open(label_filename)]
objects = [Object3d(line) for line in lines]
return objects | null |
6,042 | from __future__ import print_function
import numpy as np
import cv2
def load_image(img_filename):
return cv2.imread(img_filename) | null |
6,043 | from __future__ import print_function
import numpy as np
import cv2
def load_velo_scan(velo_filename):
scan = np.fromfile(velo_filename, dtype=np.float32)
scan = scan.reshape((-1, 4))
return scan | null |
6,044 | import sys
import torch
from utils.torch_utils import convert2cpu
def parse_cfg(cfgfile):
blocks = []
fp = open(cfgfile, 'r')
block = None
line = fp.readline()
while line != '':
line = line.rstrip()
if line == '' or line[0] == '#':
line = fp.readline()
continue
elif line[0] == '[':
if block:
blocks.append(block)
block = dict()
block['type'] = line.lstrip('[').rstrip(']')
# set default value
if block['type'] == 'convolutional':
block['batch_normalize'] = 0
else:
key, value = line.split('=')
key = key.strip()
if key == 'type':
key = '_type'
value = value.strip()
block[key] = value
line = fp.readline()
if block:
blocks.append(block)
fp.close()
return blocks | null |
6,045 | import sys
import torch
sys.path.append('../')
from utils.torch_utils import convert2cpu
def print_cfg(blocks):
print('layer filters size input output')
prev_width = 416
prev_height = 416
prev_filters = 3
out_filters = []
out_widths = []
out_heights = []
ind = -2
for block in blocks:
ind = ind + 1
if block['type'] == 'net':
prev_width = int(block['width'])
prev_height = int(block['height'])
continue
elif block['type'] == 'convolutional':
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size - 1) // 2 if is_pad else 0
width = (prev_width + 2 * pad - kernel_size) // stride + 1
height = (prev_height + 2 * pad - kernel_size) // stride + 1
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width,
height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'maxpool':
pool_size = int(block['size'])
stride = int(block['stride'])
width = prev_width // stride
height = prev_height // stride
print('%5d %-6s %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'max', pool_size, pool_size, stride, prev_width, prev_height, prev_filters, width, height,
filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'avgpool':
width = 1
height = 1
print('%5d %-6s %3d x %3d x%4d -> %3d' % (
ind, 'avg', prev_width, prev_height, prev_filters, prev_filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'softmax':
print('%5d %-6s -> %3d' % (ind, 'softmax', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'cost':
print('%5d %-6s -> %3d' % (ind, 'cost', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'reorg':
stride = int(block['stride'])
filters = stride * stride * prev_filters
width = prev_width // stride
height = prev_height // stride
print('%5d %-6s / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'reorg', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'upsample':
stride = int(block['stride'])
filters = prev_filters
width = prev_width * stride
height = prev_height * stride
print('%5d %-6s * %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'upsample', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers]
if len(layers) == 1:
print('%5d %-6s %d' % (ind, 'route', layers[0]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert (prev_width == out_widths[layers[1]])
assert (prev_height == out_heights[layers[1]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
elif len(layers) == 4:
print('%5d %-6s %d %d %d %d' % (ind, 'route', layers[0], layers[1], layers[2], layers[3]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert (prev_width == out_widths[layers[1]] == out_widths[layers[2]] == out_widths[layers[3]])
assert (prev_height == out_heights[layers[1]] == out_heights[layers[2]] == out_heights[layers[3]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]] + out_filters[layers[2]] + out_filters[
layers[3]]
else:
print("route error !!! {} {} {}".format(sys._getframe().f_code.co_filename,
sys._getframe().f_code.co_name, sys._getframe().f_lineno))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] in ['region', 'yolo']:
print('%5d %-6s' % (ind, 'detection'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'shortcut':
from_id = int(block['from'])
from_id = from_id if from_id > 0 else from_id + ind
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
prev_width = out_widths[from_id]
prev_height = out_heights[from_id]
prev_filters = out_filters[from_id]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'connected':
filters = int(block['output'])
print('%5d %-6s %d -> %3d' % (ind, 'connected', prev_filters, filters))
prev_filters = filters
out_widths.append(1)
out_heights.append(1)
out_filters.append(prev_filters)
else:
print('unknown type %s' % (block['type'])) | null |
6,046 | import sys
import torch
from utils.torch_utils import convert2cpu
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]))
start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape))
start = start + num_w
return start | null |
6,047 | import sys
import torch
from utils.torch_utils import convert2cpu
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def save_conv(fp, conv_model):
if conv_model.bias.is_cuda:
convert2cpu(conv_model.bias.data).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
conv_model.bias.data.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp) | null |
6,048 | import sys
import torch
from utils.torch_utils import convert2cpu
def load_conv_bn(buf, start, conv_model, bn_model):
num_w = conv_model.weight.numel()
num_b = bn_model.bias.numel()
bn_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]))
start = start + num_b
bn_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_b]))
start = start + num_b
bn_model.running_mean.copy_(torch.from_numpy(buf[start:start + num_b]))
start = start + num_b
bn_model.running_var.copy_(torch.from_numpy(buf[start:start + num_b]))
start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape))
start = start + num_w
return start | null |
6,049 | import sys
import torch
from utils.torch_utils import convert2cpu
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def save_conv_bn(fp, conv_model, bn_model):
if bn_model.bias.is_cuda:
convert2cpu(bn_model.bias.data).numpy().tofile(fp)
convert2cpu(bn_model.weight.data).numpy().tofile(fp)
convert2cpu(bn_model.running_mean).numpy().tofile(fp)
convert2cpu(bn_model.running_var).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
bn_model.bias.data.numpy().tofile(fp)
bn_model.weight.data.numpy().tofile(fp)
bn_model.running_mean.numpy().tofile(fp)
bn_model.running_var.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp) | null |
6,050 | import sys
import torch
from utils.torch_utils import convert2cpu
def load_fc(buf, start, fc_model):
num_w = fc_model.weight.numel()
num_b = fc_model.bias.numel()
fc_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]))
start = start + num_b
fc_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]))
start = start + num_w
return start | null |
6,051 | import sys
import torch
from utils.torch_utils import convert2cpu
def save_fc(fp, fc_model):
fc_model.bias.data.numpy().tofile(fp)
fc_model.weight.data.numpy().tofile(fp) | null |
6,052 | import argparse
import os
import time
import numpy as np
import sys
import warnings
import torch
import torch.utils.data.distributed
from tqdm import tqdm
from easydict import EasyDict as edict
from data_process.kitti_dataloader import create_val_dataloader
from models.model_utils import create_model
from utils.misc import AverageMeter, ProgressMeter
from utils.evaluation_utils import post_processing, get_batch_statistics_rotated_bbox, ap_per_class, load_classes, post_processing_v2
def parse_eval_configs():
parser = argparse.ArgumentParser(description='Demonstration config for Complex YOLO Implementation')
parser.add_argument('--classnames-infor-path', type=str, default='../dataset/kitti/classes_names.txt',
metavar='PATH', help='The class names of objects in the task')
parser.add_argument('-a', '--arch', type=str, default='darknet', metavar='ARCH',
help='The name of the model architecture')
parser.add_argument('--cfgfile', type=str, default='./config/cfg/complex_yolov4.cfg', metavar='PATH',
help='The path for cfgfile (only for darknet)')
parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',
help='the path of the pretrained checkpoint')
parser.add_argument('--use_giou_loss', action='store_true',
help='If true, use GIoU loss during training. If false, use MSE loss for training')
parser.add_argument('--no_cuda', action='store_true',
help='If true, cuda is not used.')
parser.add_argument('--gpu_idx', default=None, type=int,
help='GPU index to use.')
parser.add_argument('--img_size', type=int, default=608,
help='the size of input image')
parser.add_argument('--num_samples', type=int, default=None,
help='Take a subset of the dataset to run and debug')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of threads for loading data')
parser.add_argument('--batch_size', type=int, default=4,
help='mini-batch size (default: 4)')
parser.add_argument('--conf-thresh', type=float, default=0.5,
help='for evaluation - the threshold for class conf')
parser.add_argument('--nms-thresh', type=float, default=0.5,
help='for evaluation - the threshold for nms')
parser.add_argument('--iou-thresh', type=float, default=0.5,
help='for evaluation - the threshold for IoU')
configs = edict(vars(parser.parse_args()))
configs.pin_memory = True
####################################################################
##############Dataset, Checkpoints, and results dir configs#########
####################################################################
configs.working_dir = '../'
configs.dataset_dir = os.path.join(configs.working_dir, 'dataset', 'kitti')
return configs | null |
6,053 | import torch
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix) | null |
6,054 | import torch
def to_cpu(tensor):
return tensor.detach().cpu() | null |
6,055 | from __future__ import division
import sys
import tqdm
import torch
import numpy as np
from shapely.geometry import Polygon
import data_process.kitti_bev_utils as bev_utils
The provided code snippet includes necessary dependencies for implementing the `load_classes` function. Write a Python function `def load_classes(path)` to solve the following problem:
Loads class labels at 'path'
Here is the function:
def load_classes(path):
"""
Loads class labels at 'path'
"""
fp = open(path, "r")
names = fp.read().split("\n")[:-1]
return names | Loads class labels at 'path' |
6,056 | from __future__ import division
import sys
import tqdm
import torch
import numpy as np
from shapely.geometry import Polygon
import data_process.kitti_bev_utils as bev_utils
The provided code snippet includes necessary dependencies for implementing the `rescale_boxes` function. Write a Python function `def rescale_boxes(boxes, current_dim, original_shape)` to solve the following problem:
Rescales bounding boxes to the original shape
Here is the function:
def rescale_boxes(boxes, current_dim, original_shape):
""" Rescales bounding boxes to the original shape """
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes | Rescales bounding boxes to the original shape |
6,057 | from __future__ import division
import sys
import tqdm
import torch
import numpy as np
from shapely.geometry import Polygon
import data_process.kitti_bev_utils as bev_utils
def nms_cpu(boxes, confs, nms_thresh=0.5):
"""
:param boxes: [num, 6]
:param confs: [num, num_classes]
:param nms_thresh:
:param min_mode:
:return:
"""
# order of reduce confidence (high --> low)
order = confs.argsort()[::-1]
x, y, w, l, im, re = boxes.transpose(1, 0)
yaw = np.arctan2(im, re)
boxes_conners = get_corners_vectorize(x, y, w, l, yaw)
boxes_polygons = [cvt_box_2_polygon(box_) for box_ in boxes_conners] # 4 vertices of the box
boxes_areas = w * l
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
over = compute_iou_nms(idx_self, idx_other, boxes_polygons, boxes_areas)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
The provided code snippet includes necessary dependencies for implementing the `post_processing` function. Write a Python function `def post_processing(outputs, conf_thresh=0.95, nms_thresh=0.4)` to solve the following problem:
Removes detections with lower object confidence score than 'conf_thres' and performs Non-Maximum Suppression to further filter detections. Returns detections with shape: (x, y, w, l, im, re, object_conf, class_score, class_pred)
Here is the function:
def post_processing(outputs, conf_thresh=0.95, nms_thresh=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x, y, w, l, im, re, object_conf, class_score, class_pred)
"""
if type(outputs).__name__ != 'ndarray':
outputs = outputs.numpy()
# outputs shape: (batch_size, 22743, 10)
batch_size = outputs.shape[0]
# box_array: [batch, num, 6]
box_array = outputs[:, :, :6]
# confs: [batch, num, num_classes]
confs = outputs[:, :, 6:7] * outputs[:, :, 7:]
obj_confs = outputs[:, :, 6]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
bboxes_batch = [None for _ in range(batch_size)]
for i in range(batch_size):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_obj_confs = obj_confs[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
keep = nms_cpu(l_box_array, l_max_conf, nms_thresh=nms_thresh)
if (keep.size > 0):
l_box_array = l_box_array[keep, :]
l_obj_confs = l_obj_confs[keep].reshape(-1, 1)
l_max_conf = l_max_conf[keep].reshape(-1, 1)
l_max_id = l_max_id[keep].reshape(-1, 1)
bboxes_batch[i] = np.concatenate((l_box_array, l_obj_confs, l_max_conf, l_max_id), axis=-1)
return bboxes_batch | Removes detections with lower object confidence score than 'conf_thres' and performs Non-Maximum Suppression to further filter detections. Returns detections with shape: (x, y, w, l, im, re, object_conf, class_score, class_pred) |
6,058 | import os
import torch
import time
def make_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# or os.makedirs(folder_name, exist_ok=True) | null |
6,059 | import os
import torch
import time
def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time() | null |
6,060 | import copy
import os
import math
import torch
from torch.optim.lr_scheduler import LambdaLR
import torch.distributed as dist
import matplotlib.pyplot as plt
def plot_lr_scheduler(optimizer, scheduler, num_epochs=300, save_dir=''):
# Plot LR simulating training for full num_epochs
optimizer, scheduler = copy.copy(optimizer), copy.copy(scheduler) # do not modify originals
y = []
for _ in range(num_epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, num_epochs)
plt.ylim(0)
plt.tight_layout()
plt.savefig(os.path.join(save_dir, 'LR.png'), dpi=200) | null |
6,061 | from __future__ import division
import sys
import torch
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
from utils.cal_intersection_rotated_boxes import intersection_area, PolyArea2D
def cvt_box_2_polygon(box):
"""
:param array: an array of shape [num_conners, 2]
:return: a shapely.geometry.Polygon object
"""
# use .buffer(0) to fix a line polygon
# more infor: https://stackoverflow.com/questions/13062334/polygon-intersection-error-in-shapely-shapely-geos-topologicalerror-the-opera
return Polygon([(box[i, 0], box[i, 1]) for i in range(len(box))]).buffer(0)
def get_corners_vectorize(x, y, w, l, yaw):
"""bev image coordinates format - vectorization
:param x, y, w, l, yaw: [num_boxes,]
:return: num_boxes x (x,y) of 4 conners
"""
device = x.device
bbox2 = torch.zeros((x.size(0), 4, 2), device=device, dtype=torch.float)
cos_yaw = torch.cos(yaw)
sin_yaw = torch.sin(yaw)
# front left
bbox2[:, 0, 0] = x - w / 2 * cos_yaw - l / 2 * sin_yaw
bbox2[:, 0, 1] = y - w / 2 * sin_yaw + l / 2 * cos_yaw
# rear left
bbox2[:, 1, 0] = x - w / 2 * cos_yaw + l / 2 * sin_yaw
bbox2[:, 1, 1] = y - w / 2 * sin_yaw - l / 2 * cos_yaw
# rear right
bbox2[:, 2, 0] = x + w / 2 * cos_yaw + l / 2 * sin_yaw
bbox2[:, 2, 1] = y + w / 2 * sin_yaw - l / 2 * cos_yaw
# front right
bbox2[:, 3, 0] = x + w / 2 * cos_yaw - l / 2 * sin_yaw
bbox2[:, 3, 1] = y + w / 2 * sin_yaw + l / 2 * cos_yaw
return bbox2
The provided code snippet includes necessary dependencies for implementing the `get_polygons_areas_fix_xy` function. Write a Python function `def get_polygons_areas_fix_xy(boxes, fix_xy=100.)` to solve the following problem:
Args: box: (num_boxes, 4) --> w, l, im, re
Here is the function:
def get_polygons_areas_fix_xy(boxes, fix_xy=100.):
"""
Args:
box: (num_boxes, 4) --> w, l, im, re
"""
device = boxes.device
n_boxes = boxes.size(0)
x = torch.full(size=(n_boxes,), fill_value=fix_xy, device=device, dtype=torch.float)
y = torch.full(size=(n_boxes,), fill_value=fix_xy, device=device, dtype=torch.float)
w, l, im, re = boxes.t()
yaw = torch.atan2(im, re)
boxes_conners = get_corners_vectorize(x, y, w, l, yaw)
boxes_polygons = [cvt_box_2_polygon(box_) for box_ in boxes_conners]
boxes_areas = w * l
return boxes_polygons, boxes_areas | Args: box: (num_boxes, 4) --> w, l, im, re |
6,062 | from __future__ import division
import sys
import torch
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
from utils.cal_intersection_rotated_boxes import intersection_area, PolyArea2D
def iou_rotated_boxes_targets_vs_anchors(anchors_polygons, anchors_areas, targets_polygons, targets_areas):
device = anchors_areas.device
num_anchors = len(anchors_areas)
num_targets_boxes = len(targets_areas)
ious = torch.zeros(size=(num_anchors, num_targets_boxes), device=device, dtype=torch.float)
for a_idx in range(num_anchors):
for tg_idx in range(num_targets_boxes):
intersection = anchors_polygons[a_idx].intersection(targets_polygons[tg_idx]).area
iou = intersection / (anchors_areas[a_idx] + targets_areas[tg_idx] - intersection + 1e-16)
ious[a_idx, tg_idx] = iou
return ious | null |
6,063 | from __future__ import division
import sys
import torch
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
from utils.cal_intersection_rotated_boxes import intersection_area, PolyArea2D
def cvt_box_2_polygon(box):
def get_corners_vectorize(x, y, w, l, yaw):
def intersection_area(rect1, rect2):
def PolyArea2D(pts):
def iou_pred_vs_target_boxes(pred_boxes, target_boxes, GIoU=False, DIoU=False, CIoU=False):
assert pred_boxes.size() == target_boxes.size(), "Unmatch size of pred_boxes and target_boxes"
device = pred_boxes.device
n_boxes = pred_boxes.size(0)
t_x, t_y, t_w, t_l, t_im, t_re = target_boxes.t()
t_yaw = torch.atan2(t_im, t_re)
t_conners = get_corners_vectorize(t_x, t_y, t_w, t_l, t_yaw)
t_areas = t_w * t_l
p_x, p_y, p_w, p_l, p_im, p_re = pred_boxes.t()
p_yaw = torch.atan2(p_im, p_re)
p_conners = get_corners_vectorize(p_x, p_y, p_w, p_l, p_yaw)
p_areas = p_w * p_l
ious = []
giou_loss = torch.tensor([0.], device=device, dtype=torch.float)
# Thinking to apply vectorization this step
for box_idx in range(n_boxes):
p_cons, t_cons = p_conners[box_idx], t_conners[box_idx]
if not GIoU:
p_poly, t_poly = cvt_box_2_polygon(p_cons), cvt_box_2_polygon(t_cons)
intersection = p_poly.intersection(t_poly).area
else:
intersection = intersection_area(p_cons, t_cons)
p_area, t_area = p_areas[box_idx], t_areas[box_idx]
union = p_area + t_area - intersection
iou = intersection / (union + 1e-16)
if GIoU:
convex_conners = torch.cat((p_cons, t_cons), dim=0)
hull = ConvexHull(convex_conners.clone().detach().cpu().numpy()) # done on cpu, just need indices output
convex_conners = convex_conners[hull.vertices]
convex_area = PolyArea2D(convex_conners)
giou_loss += 1. - (iou - (convex_area - union) / (convex_area + 1e-16))
else:
giou_loss += 1. - iou
if DIoU or CIoU:
raise NotImplementedError
ious.append(iou)
return torch.tensor(ious, device=device, dtype=torch.float), giou_loss | null |
6,064 | from __future__ import division
import sys
import torch
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
from utils.cal_intersection_rotated_boxes import intersection_area, PolyArea2D
def get_corners_torch(x, y, w, l, yaw):
device = x.device
bev_corners = torch.zeros((4, 2), dtype=torch.float, device=device)
cos_yaw = torch.cos(yaw)
sin_yaw = torch.sin(yaw)
# front left
bev_corners[0, 0] = x - w / 2 * cos_yaw - l / 2 * sin_yaw
bev_corners[0, 1] = y - w / 2 * sin_yaw + l / 2 * cos_yaw
# rear left
bev_corners[1, 0] = x - w / 2 * cos_yaw + l / 2 * sin_yaw
bev_corners[1, 1] = y - w / 2 * sin_yaw - l / 2 * cos_yaw
# rear right
bev_corners[2, 0] = x + w / 2 * cos_yaw + l / 2 * sin_yaw
bev_corners[2, 1] = y + w / 2 * sin_yaw - l / 2 * cos_yaw
# front right
bev_corners[3, 0] = x + w / 2 * cos_yaw - l / 2 * sin_yaw
bev_corners[3, 1] = y + w / 2 * sin_yaw + l / 2 * cos_yaw
return bev_corners | null |
6,065 | import sys
import math
import numpy as np
import mayavi.mlab as mlab
import cv2
from data_process import kitti_data_utils, kitti_bev_utils, transformation
import config.kitti_config as cnf
The provided code snippet includes necessary dependencies for implementing the `draw_lidar_simple` function. Write a Python function `def draw_lidar_simple(pc, color=None)` to solve the following problem:
Draw lidar points. simplest set up.
Here is the function:
def draw_lidar_simple(pc, color=None):
''' Draw lidar points. simplest set up. '''
fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1600, 1000))
if color is None: color = pc[:, 2]
# draw points
mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], color, color=None, mode='point', colormap='gnuplot', scale_factor=1,
figure=fig)
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)
# draw axis
axes = np.array([
[2., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 2., 0.],
], dtype=np.float64)
mlab.plot3d([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig)
mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig | Draw lidar points. simplest set up. |
6,066 | import sys
import math
import numpy as np
import mayavi.mlab as mlab
import cv2
from data_process import kitti_data_utils, kitti_bev_utils, transformation
import config.kitti_config as cnf
The provided code snippet includes necessary dependencies for implementing the `show_image_with_boxes` function. Write a Python function `def show_image_with_boxes(img, objects, calib, show3d=False)` to solve the following problem:
Show image with 2D bounding boxes
Here is the function:
def show_image_with_boxes(img, objects, calib, show3d=False):
''' Show image with 2D bounding boxes '''
img2 = np.copy(img) # for 3d bbox
for obj in objects:
if obj.type == 'DontCare': continue
# cv2.rectangle(img2, (int(obj.xmin),int(obj.ymin)),
# (int(obj.xmax),int(obj.ymax)), (0,255,0), 2)
box3d_pts_2d, box3d_pts_3d = kitti_data_utils.compute_box_3d(obj, calib.P)
if box3d_pts_2d is not None:
img2 = kitti_data_utils.draw_projected_box3d(img2, box3d_pts_2d, cnf.colors[obj.cls_id])
if show3d:
cv2.imshow("img", img2)
return img2 | Show image with 2D bounding boxes |
6,067 | import sys
import math
import numpy as np
import mayavi.mlab as mlab
import cv2
from data_process import kitti_data_utils, kitti_bev_utils, transformation
import config.kitti_config as cnf
def draw_lidar(pc, color=None, fig1=None, bgcolor=(0, 0, 0), pts_scale=1, pts_mode='point', pts_color=None):
''' Draw lidar points
Args:
pc: numpy array (n,3) of XYZ
color: numpy array (n) of intensity or whatever
fig: mayavi figure handler, if None create new one otherwise will use it
Returns:
fig: created or used fig
'''
# if fig1 is None: fig1 = mlab.figure(figure="point cloud", bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000))
mlab.clf(figure=None)
if color is None: color = pc[:, 2]
mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], color, color=pts_color, mode=pts_mode, colormap='gnuplot',
scale_factor=pts_scale, figure=fig1)
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)
# draw axis
axes = np.array([
[2., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 2., 0.],
], dtype=np.float64)
mlab.plot3d([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig1)
mlab.plot3d([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig1)
mlab.plot3d([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig1)
# draw fov (todo: update to real sensor spec.)
fov = np.array([ # 45 degree
[20., 20., 0., 0.],
[20., -20., 0., 0.],
], dtype=np.float64)
mlab.plot3d([0, fov[0, 0]], [0, fov[0, 1]], [0, fov[0, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,
figure=fig1)
mlab.plot3d([0, fov[1, 0]], [0, fov[1, 1]], [0, fov[1, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,
figure=fig1)
# draw square region
TOP_Y_MIN = -20
TOP_Y_MAX = 20
TOP_X_MIN = 0
TOP_X_MAX = 40
TOP_Z_MIN = -2.0
TOP_Z_MAX = 0.4
x1 = TOP_X_MIN
x2 = TOP_X_MAX
y1 = TOP_Y_MIN
y2 = TOP_Y_MAX
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig1)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig1)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig1)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig1)
# mlab.orientation_axes()
mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance=60.0, figure=fig1)
return fig1
def draw_gt_boxes3d(gt_boxes3d, fig, color=(1, 1, 1), line_width=2, draw_text=True, text_scale=(1, 1, 1),
color_list=None):
''' Draw 3D bounding boxes
Args:
gt_boxes3d: numpy array (n,8,3) for XYZs of the box corners
fig: mayavi figure handler
color: RGB value tuple in range (0,1), box line color
line_width: box line width
draw_text: boolean, if true, write box indices beside boxes
text_scale: three number tuple
color_list: a list of RGB tuple, if not None, overwrite color.
Returns:
fig: updated fig
'''
num = len(gt_boxes3d)
for n in range(num):
b = gt_boxes3d[n]
if color_list is not None:
color = color_list[n]
if draw_text: mlab.text3d(b[4, 0], b[4, 1], b[4, 2], '%d' % n, scale=text_scale, color=color, figure=fig)
for k in range(0, 4):
# http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=None,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=None,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=None,
line_width=line_width, figure=fig)
# mlab.show(1)
# mlab.view(azimuth=180, elevation=70, focalpoint=[ 12.0909996 , -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig
def get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax,
return_more=False, clip_distance=0.0):
''' Filter lidar points, keep those in image FOV '''
pts_2d = calib.project_velo_to_image(pc_velo)
fov_inds = (pts_2d[:, 0] < xmax) & (pts_2d[:, 0] >= xmin) & \
(pts_2d[:, 1] < ymax) & (pts_2d[:, 1] >= ymin)
fov_inds = fov_inds & (pc_velo[:, 0] > clip_distance)
imgfov_pc_velo = pc_velo[fov_inds, :]
if return_more:
return imgfov_pc_velo, pts_2d, fov_inds
else:
return imgfov_pc_velo
The provided code snippet includes necessary dependencies for implementing the `show_lidar_with_boxes` function. Write a Python function `def show_lidar_with_boxes(pc_velo, objects, calib, img_fov=False, img_width=None, img_height=None, fig=None)` to solve the following problem:
Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system)
Here is the function:
def show_lidar_with_boxes(pc_velo, objects, calib,
img_fov=False, img_width=None, img_height=None, fig=None):
''' Show all LiDAR points.
Draw 3d box in LiDAR point cloud (in velo coord system) '''
if not fig:
fig = mlab.figure(figure="KITTI_POINT_CLOUD", bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1250, 550))
if img_fov:
pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0, img_width, img_height)
draw_lidar(pc_velo, fig1=fig)
for obj in objects:
if obj.type == 'DontCare': continue
# Draw 3d bounding box
box3d_pts_2d, box3d_pts_3d = kitti_data_utils.compute_box_3d(obj, calib.P)
box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
# Draw heading arrow
ori3d_pts_2d, ori3d_pts_3d = kitti_data_utils.compute_orientation_3d(obj, calib.P)
ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
x1, y1, z1 = ori3d_pts_3d_velo[0, :]
x2, y2, z2 = ori3d_pts_3d_velo[1, :]
draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=(0, 1, 1), line_width=2, draw_text=False)
mlab.plot3d([x1, x2], [y1, y2], [z1, z2], color=(0.5, 0.5, 0.5), tube_radius=None, line_width=1, figure=fig)
mlab.view(distance=90) | Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) |
6,068 | import sys
import math
import numpy as np
import mayavi.mlab as mlab
import cv2
from data_process import kitti_data_utils, kitti_bev_utils, transformation
import config.kitti_config as cnf
def merge_rgb_to_bev(img_rgb, img_bev, output_width):
img_rgb_h, img_rgb_w = img_rgb.shape[:2]
ratio_rgb = output_width / img_rgb_w
output_rgb_h = int(ratio_rgb * img_rgb_h)
ret_img_rgb = cv2.resize(img_rgb, (output_width, output_rgb_h))
img_bev_h, img_bev_w = img_bev.shape[:2]
ratio_bev = output_width / img_bev_w
output_bev_h = int(ratio_bev * img_bev_h)
ret_img_bev = cv2.resize(img_bev, (output_width, output_bev_h))
out_img = np.zeros((output_rgb_h + output_bev_h, output_width, 3), dtype=np.uint8)
# Upper: RGB --> BEV
out_img[:output_rgb_h, ...] = ret_img_rgb
out_img[output_rgb_h:, ...] = ret_img_bev
return out_img | null |
6,069 | import sys
import math
import numpy as np
import mayavi.mlab as mlab
import cv2
from data_process import kitti_data_utils, kitti_bev_utils, transformation
import config.kitti_config as cnf
def invert_target(targets, calib, img_shape_2d, RGB_Map=None):
predictions = targets
predictions = kitti_bev_utils.inverse_yolo_target(predictions, cnf.boundary)
if predictions.shape[0]:
predictions[:, 1:] = transformation.lidar_to_camera_box(predictions[:, 1:], calib.V2C, calib.R0, calib.P)
objects_new = []
corners3d = []
for index, l in enumerate(predictions):
if l[0] == 0:
str = "Car"
elif l[0] == 1:
str = "Pedestrian"
elif l[0] == 2:
str = "Cyclist"
else:
str = "Ignore"
line = '%s -1 -1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0' % str
obj = kitti_data_utils.Object3d(line)
obj.t = l[1:4]
obj.h, obj.w, obj.l = l[4:7]
obj.ry = np.arctan2(math.sin(l[7]), math.cos(l[7]))
_, corners_3d = kitti_data_utils.compute_box_3d(obj, calib.P)
corners3d.append(corners_3d)
objects_new.append(obj)
if len(corners3d) > 0:
corners3d = np.array(corners3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape_2d[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape_2d[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape_2d[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape_2d[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(img_boxes_w < img_shape_2d[1] * 0.8, img_boxes_h < img_shape_2d[0] * 0.8)
for i, obj in enumerate(objects_new):
x, z, ry = obj.t[0], obj.t[2], obj.ry
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
obj.alpha = alpha
obj.box2d = img_boxes[i, :]
if RGB_Map is not None:
labels, noObjectLabels = kitti_bev_utils.read_labels_for_bevbox(objects_new)
if not noObjectLabels:
labels[:, 1:] = transformation.camera_to_lidar_box(labels[:, 1:], calib.V2C, calib.R0,
calib.P) # convert rect cam to velo cord
target = kitti_bev_utils.build_yolo_target(labels)
kitti_bev_utils.draw_box_in_bev(RGB_Map, target)
return objects_new | null |
6,070 | import sys
import math
import numpy as np
import mayavi.mlab as mlab
import cv2
from data_process import kitti_data_utils, kitti_bev_utils, transformation
import config.kitti_config as cnf
def predictions_to_kitti_format(img_detections, calib, img_shape_2d, img_size, RGB_Map=None):
predictions = []
for detections in img_detections:
if detections is None:
continue
# Rescale boxes to original image
for x, y, w, l, im, re, *_, cls_pred in detections:
predictions.append([cls_pred, x / img_size, y / img_size, w / img_size, l / img_size, im, re])
predictions = kitti_bev_utils.inverse_yolo_target(np.array(predictions), cnf.boundary)
if predictions.shape[0]:
predictions[:, 1:] = transformation.lidar_to_camera_box(predictions[:, 1:], calib.V2C, calib.R0, calib.P)
objects_new = []
corners3d = []
for index, l in enumerate(predictions):
if l[0] == 0:
str = "Car"
elif l[0] == 1:
str = "Pedestrian"
elif l[0] == 2:
str = "Cyclist"
else:
str = "Ignore"
line = '%s -1 -1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0' % str
obj = kitti_data_utils.Object3d(line)
obj.t = l[1:4]
obj.h, obj.w, obj.l = l[4:7]
obj.ry = np.arctan2(math.sin(l[7]), math.cos(l[7]))
_, corners_3d = kitti_data_utils.compute_box_3d(obj, calib.P)
corners3d.append(corners_3d)
objects_new.append(obj)
if len(corners3d) > 0:
corners3d = np.array(corners3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape_2d[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape_2d[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape_2d[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape_2d[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(img_boxes_w < img_shape_2d[1] * 0.8, img_boxes_h < img_shape_2d[0] * 0.8)
for i, obj in enumerate(objects_new):
x, z, ry = obj.t[0], obj.t[2], obj.ry
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
obj.alpha = alpha
obj.box2d = img_boxes[i, :]
if RGB_Map is not None:
labels, noObjectLabels = kitti_bev_utils.read_labels_for_bevbox(objects_new)
if not noObjectLabels:
labels[:, 1:] = transformation.camera_to_lidar_box(labels[:, 1:], calib.V2C, calib.R0,
calib.P) # convert rect cam to velo cord
target = kitti_bev_utils.build_yolo_target(labels)
kitti_bev_utils.draw_box_in_bev(RGB_Map, target)
return objects_new | null |
6,071 | import torch
def get_corners_torch(x, y, w, l, yaw):
device = x.device
bev_corners = torch.zeros((4, 2), dtype=torch.float, device=device)
cos_yaw = torch.cos(yaw)
sin_yaw = torch.sin(yaw)
# front left
bev_corners[0, 0] = x - w / 2 * cos_yaw - l / 2 * sin_yaw
bev_corners[0, 1] = y - w / 2 * sin_yaw + l / 2 * cos_yaw
# rear left
bev_corners[1, 0] = x - w / 2 * cos_yaw + l / 2 * sin_yaw
bev_corners[1, 1] = y - w / 2 * sin_yaw - l / 2 * cos_yaw
# rear right
bev_corners[2, 0] = x + w / 2 * cos_yaw + l / 2 * sin_yaw
bev_corners[2, 1] = y + w / 2 * sin_yaw - l / 2 * cos_yaw
# front right
bev_corners[3, 0] = x + w / 2 * cos_yaw - l / 2 * sin_yaw
bev_corners[3, 1] = y + w / 2 * sin_yaw + l / 2 * cos_yaw
return bev_corners | null |
6,072 | import math
import jittor as jt
from jittor import nn
def get_freq_indices(method):
assert method in ['top1', 'top2', 'top4', 'top8', 'top16', 'top32',
'bot1', 'bot2', 'bot4', 'bot8', 'bot16', 'bot32',
'low1', 'low2', 'low4', 'low8', 'low16', 'low32']
num_freq = int(method[3:])
if 'top' in method:
all_top_indices_x = [0, 0, 6, 0, 0, 1, 1, 4, 5, 1, 3, 0, 0,
0, 3, 2, 4, 6, 3, 5, 5, 2, 6, 5, 5, 3, 3, 4, 2, 2, 6, 1]
all_top_indices_y = [0, 1, 0, 5, 2, 0, 2, 0, 0, 6, 0, 4, 6,
3, 5, 2, 6, 3, 3, 3, 5, 1, 1, 2, 4, 2, 1, 1, 3, 0, 5, 3]
mapper_x = all_top_indices_x[:num_freq]
mapper_y = all_top_indices_y[:num_freq]
elif 'low' in method:
all_low_indices_x = [0, 0, 1, 1, 0, 2, 2, 1, 2, 0, 3, 4, 0,
1, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4]
all_low_indices_y = [0, 1, 0, 1, 2, 0, 1, 2, 2, 3, 0, 0, 4,
3, 1, 5, 4, 3, 2, 1, 0, 6, 5, 4, 3, 2, 1, 0, 6, 5, 4, 3]
mapper_x = all_low_indices_x[:num_freq]
mapper_y = all_low_indices_y[:num_freq]
elif 'bot' in method:
all_bot_indices_x = [6, 1, 3, 3, 2, 4, 1, 2, 4, 4, 5, 1, 4,
6, 2, 5, 6, 1, 6, 2, 2, 4, 3, 3, 5, 5, 6, 2, 5, 5, 3, 6]
all_bot_indices_y = [6, 4, 4, 6, 6, 3, 1, 4, 4, 5, 6, 5, 2,
2, 5, 1, 4, 3, 5, 0, 3, 1, 1, 2, 4, 2, 1, 1, 5, 3, 3, 3]
mapper_x = all_bot_indices_x[:num_freq]
mapper_y = all_bot_indices_y[:num_freq]
else:
raise NotImplementedError
return mapper_x, mapper_y | null |
6,073 | import jittor as jt
from jittor import nn
import numpy as np
def affine_grid_generator(height, width, theta):
num_batch = theta.shape[0]
# create normalized 2D grid
x = jt.linspace(-1.0, 1.0, width)
y = jt.linspace(-1.0, 1.0, height)
x_t, y_t = jt.meshgrid(x, y)
# flatten
x_t_flat = x_t.reshape(-1)
y_t_flat = y_t.reshape(-1)
print(x_t.shape)
# reshape to [x_t, y_t , 1] - (homogeneous form)
ones = jt.ones_like(x_t_flat)
sampling_grid = jt.stack([x_t_flat, y_t_flat, ones])
# repeat grid num_batch times
sampling_grid = sampling_grid.unsqueeze(0).expand(num_batch, -1, -1)
# transform the sampling grid - batch multiply
batch_grids = jt.matmul(theta, sampling_grid)
# reshape to (num_batch, H, W, 2)
batch_grids = batch_grids.reshape(num_batch, 2, height, width)
return batch_grids | null |
6,074 | import jittor as jt
from jittor import nn
import numpy as np
def get_pixel_value(img, x, y):
B, C, H, W = img.shape
return img.reindex([B, C, H, W], ['i0', 'i1', '@e0(i0, i2, i3)','@e1(i0, i2, i3)'], extras=[x, y])
def bilinear_sampler(img, x, y):
B, C, H ,W = img.shape
max_y = H - 1
max_x = W - 1
# rescale x and y to [0, W-1/H-1]
x = 0.5 * (x + 1.0) * (max_x-1)
y = 0.5 * (y + 1.0) * (max_y-1)
# grab 4 nearest corner points for each (x_i, y_i)
x0 = jt.floor(x).astype('int32')
x1 = x0 + 1
y0 = jt.floor(y).astype('int32')
y1 = y0 + 1
x0 = jt.minimum(jt.maximum(0, x0), max_x)
x1 = jt.minimum(jt.maximum(0, x1), max_x)
y0 = jt.minimum(jt.maximum(0, y0), max_y)
y1 = jt.minimum(jt.maximum(0, y1), max_y)
# get pixel value at corner coords
Ia = get_pixel_value(img, x0, y0)
Ib = get_pixel_value(img, x0, y1)
Ic = get_pixel_value(img, x1, y0)
Id = get_pixel_value(img, x1, y1)
# calculate deltas
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
# compute output
out = wa*Ia + wb*Ib + wc*Ic + wd*Id
return out | null |
6,075 | import jittor as jt
from jittor import nn
from contextlib import contextmanager
def null_context():
yield | null |
6,076 | import jittor as jt
from jittor import nn
def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v | null |
6,077 | import functools
import inspect
import opcode
import os
import sys
import re
import collections
import datetime as datetime_module
import itertools
import threading
import traceback
from .variables import CommonVariable, Exploding, BaseVariable
from . import utils, pycompat
import collections
del collections, __VersionInfo
def get_local_reprs(frame, watch=(), custom_repr=(), max_length=None, normalize=False):
code = frame.f_code
vars_order = (code.co_varnames + code.co_cellvars + code.co_freevars +
tuple(frame.f_locals.keys()))
result_items = [(key, utils.get_shortish_repr(value, custom_repr,
max_length, normalize))
for key, value in frame.f_locals.items()]
result_items.sort(key=lambda key_value: vars_order.index(key_value[0]))
result = collections.OrderedDict(result_items)
for variable in watch:
result.update(sorted(variable.items(frame, normalize)))
return result | null |
6,078 | import functools
import inspect
import opcode
import os
import sys
import re
import collections
import datetime as datetime_module
import itertools
import threading
import traceback
from .variables import CommonVariable, Exploding, BaseVariable
from . import utils, pycompat
if pycompat.PY2:
from io import open
ipython_filename_pattern = re.compile('^<ipython-input-([0-9]+)-.*>$')
ansible_filename_pattern = re.compile(r'^(.+\.zip)[/|\\](ansible[/|\\]modules[/|\\].+\.py)$')
class UnavailableSource(object):
def __getitem__(self, i):
return u'SOURCE IS UNAVAILABLE'
source_and_path_cache = {}
def get_path_and_source_from_frame(frame):
globs = frame.f_globals or {}
module_name = globs.get('__name__')
file_name = frame.f_code.co_filename
cache_key = (module_name, file_name)
try:
return source_and_path_cache[cache_key]
except KeyError:
pass
loader = globs.get('__loader__')
source = None
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
ipython_filename_match = ipython_filename_pattern.match(file_name)
ansible_filename_match = ansible_filename_pattern.match(file_name)
if ipython_filename_match:
entry_number = int(ipython_filename_match.group(1))
try:
import IPython
ipython_shell = IPython.get_ipython()
((_, _, source_chunk),) = ipython_shell.history_manager. \
get_range(0, entry_number, entry_number + 1)
source = source_chunk.splitlines()
except Exception:
pass
elif ansible_filename_match:
try:
import zipfile
archive_file = zipfile.ZipFile(ansible_filename_match.group(1), 'r')
source = archive_file.read(ansible_filename_match.group(2).replace('\\', '/')).splitlines()
except Exception:
pass
else:
try:
with open(file_name, 'rb') as fp:
source = fp.read().splitlines()
except utils.file_reading_errors:
pass
if not source:
# We used to check `if source is None` but I found a rare bug where it
# was empty, but not `None`, so now we check `if not source`.
source = UnavailableSource()
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = 'utf-8'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (https://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [pycompat.text_type(sline, encoding, 'replace') for sline in
source]
result = (file_name, source)
source_and_path_cache[cache_key] = result
return result | null |
6,079 | import functools
import inspect
import opcode
import os
import sys
import re
import collections
import datetime as datetime_module
import itertools
import threading
import traceback
from .variables import CommonVariable, Exploding, BaseVariable
from . import utils, pycompat
if pycompat.PY2:
from io import open
class FileWriter(object):
def __init__(self, path, overwrite):
def write(self, s):
def get_write_function(output, overwrite):
is_path = isinstance(output, (pycompat.PathLike, str))
if overwrite and not is_path:
raise Exception('`overwrite=True` can only be used when writing '
'content to file.')
if output is None:
def write(s):
stderr = sys.stderr
try:
stderr.write(s)
except UnicodeEncodeError:
# God damn Python 2
stderr.write(utils.shitcode(s))
elif is_path:
return FileWriter(output, overwrite).write
elif callable(output):
write = output
else:
assert isinstance(output, utils.WritableStream)
def write(s):
output.write(s)
return write | null |
6,080 | import abc
import re
import sys
from .pycompat import ABC, string_types, collections_abc
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True | null |
6,081 | import abc
import re
import sys
from .pycompat import ABC, string_types, collections_abc
def ensure_tuple(x):
if isinstance(x, collections_abc.Iterable) and \
not isinstance(x, string_types):
return tuple(x)
else:
return (x,) | null |
6,082 | import itertools
import abc
from copy import deepcopy
from . import utils
from . import pycompat
def needs_parentheses(source):
def code(s):
return compile(s, '<variable>', 'eval').co_code
return code('{}.x'.format(source)) != code('({}).x'.format(source)) | null |
6,083 | import abc
import os
import inspect
import sys
import datetime as datetime_module
if sys.version_info[:2] >= (3, 6):
time_isoformat = datetime_module.time.isoformat
else:
def time_isoformat(time, timespec='microseconds'):
assert isinstance(time, datetime_module.time)
if timespec != 'microseconds':
raise NotImplementedError
result = '{:02d}:{:02d}:{:02d}.{:06d}'.format(
time.hour, time.minute, time.second, time.microsecond
)
assert len(result) == 15
return result
def timedelta_format(timedelta):
time = (datetime_module.datetime.min + timedelta).time()
return time_isoformat(time, timespec='microseconds') | null |
6,084 | import abc
import os
import inspect
import sys
import datetime as datetime_module
def timedelta_parse(s):
hours, minutes, seconds, microseconds = map(
int,
s.replace('.', ':').split(':')
)
return datetime_module.timedelta(hours=hours, minutes=minutes,
seconds=seconds,
microseconds=microseconds) | null |
6,085 | import setuptools
import re
def read_file(filename):
with open(filename) as file:
return file.read() | null |
6,086 | import subprocess
import sys
def iterate_authors_by_chronological_order(branch):
log_call = subprocess.run(
(
'git', 'log', branch, '--encoding=utf-8', '--full-history',
'--reverse', '--format=format:%at;%an;%ae'
),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
log_lines = log_call.stdout.decode('utf-8').split('\n')
authors = tuple(line.strip().split(";")[1] for line in log_lines)
authors = (author for author in authors if author not in deny_list)
return drop_recurrences(authors)
def print_authors(branch):
for author in iterate_authors_by_chronological_order(branch):
sys.stdout.buffer.write(author.encode())
sys.stdout.buffer.write(b'\n') | null |
6,087 | import json
import random
import time
import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset
def to_float(x):
return x.cpu().detach().numpy().flatten()[0].astype(float) | null |
6,088 | import json
import random
import time
import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) | null |
6,089 | from torch.utils.cpp_extension import load
import math
import numpy as np
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
def RWKV_Init(module, config): # fancy initialization of all lin & emb layer in the module
for m in module.modules():
if not isinstance(m, (nn.Linear, nn.Embedding)):
continue
with torch.no_grad():
name = '[unknown weight]'
for name, parameter in module.named_parameters(): # find the name of the weight
if id(m.weight) == id(parameter):
break
shape = m.weight.data.shape
gain = 1.0
scale = 1.0 # extra scale for gain
if isinstance(m, nn.Embedding):
gain = math.sqrt(max(shape[0], shape[1]))
if shape[0] == config.vocab_size and shape[1] == config.n_embd: # token emb?
scale = 1e-4
else:
scale = 0
if isinstance(m, nn.Linear):
if m.bias is not None:
m.bias.data.zero_()
if shape[0] > shape[1]:
gain = math.sqrt(shape[0] / shape[1])
if shape[0] == config.vocab_size and shape[1] == config.n_embd: # final projection?
scale = 0.5
if hasattr(m, 'scale_init'):
scale = m.scale_init
# print(str(shape[0]).ljust(5), str(shape[1]).ljust(5), f'{round(scale,2):g}'.ljust(4), name)
gain *= scale
if scale == -999:
nn.init.eye_(m.weight)
elif gain == 0:
# zero init is great for some RWKV matrices
nn.init.zeros_(m.weight)
elif gain > 0:
nn.init.orthogonal_(m.weight, gain=gain)
else:
nn.init.normal_(m.weight, mean=0.0, std=-scale) | null |
6,090 | print('Loading...')
from src.model_run import RWKV_RNN
import numpy as np
import os, copy, types, gc, sys
import torch
from src.utils import TOKENIZER
tokenizer = TOKENIZER(WORD_NAME, UNKNOWN_CHAR=UNKNOWN_CHAR)
args = types.SimpleNamespace()
args.RUN_DEVICE = "cuda"
args.FLOAT_MODE = "fp16"
args.vocab_size = 50277
args.head_qk = 0
args.pre_ffn = 0
args.grad_cp = 0
args.my_pos_emb = 0
args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-14b/RWKV-4-Pile-14B-20230108-5170'
args.n_layer = 40
args.n_embd = 5120
args.ctx_len = 1024
print(f'loading... {MODEL_NAME}')
model_tokens = []
current_state = None
def run_rnn(tokens, newline_adj = 0):
global model_tokens, current_state
for i in range(len(tokens)):
model_tokens += [int(tokens[i])]
if i == len(tokens) - 1:
out, current_state = model.forward(model_tokens, current_state)
else:
current_state = model.forward(model_tokens, current_state, preprocess_only = True)
# print(f'### model ###\n[{tokenizer.tokenizer.decode(model_tokens)}]')
out[0] = -999999999 # disable <|endoftext|>
out[187] += newline_adj
# if newline_adj > 0:
# out[15] += newline_adj / 2 # '.'
return out
def save_all_stat(srv, name, last_out):
n = f'{name}_{srv}'
all_state[n] = {}
all_state[n]['out'] = last_out
all_state[n]['rnn'] = copy.deepcopy(current_state)
all_state[n]['token'] = copy.deepcopy(model_tokens)
def load_all_stat(srv, name):
global model_tokens, current_state
n = f'{name}_{srv}'
current_state = copy.deepcopy(all_state[n]['rnn'])
model_tokens = copy.deepcopy(all_state[n]['token'])
return all_state[n]['out']
print(f'\nRun prompt...')
out = run_rnn(tokenizer.tokenizer.encode(init_prompt))
save_all_stat('', 'chat_init', out)
print(f'### prompt ###\n[{tokenizer.tokenizer.decode(model_tokens)}]\n')
def reply_msg(msg):
print(f'{bot}{interface} {msg}\n')
print(HELP_MSG)
while True:
msg = input(f'{user}{interface} ')
if len(msg.strip()) > 0:
on_message(msg)
else:
print('Erorr: please say something')
def on_message(message):
global model_tokens, current_state
srv = 'dummy_server'
msg = message.replace('\\n','\n').strip()
if len(msg) > 1000:
reply_msg('your message is too long (max 1000 tokens)')
return
x_temp = 1.0
x_top_p = 0.85
if ("-temp=" in msg):
x_temp = float(msg.split("-temp=")[1].split(" ")[0])
msg = msg.replace("-temp="+f'{x_temp:g}', "")
# print(f"temp: {x_temp}")
if ("-top_p=" in msg):
x_top_p = float(msg.split("-top_p=")[1].split(" ")[0])
msg = msg.replace("-top_p="+f'{x_top_p:g}', "")
# print(f"top_p: {x_top_p}")
if x_temp <= 0.2:
x_temp = 0.2
if x_temp >= 5:
x_temp = 5
if x_top_p <= 0:
x_top_p = 0
if msg == '+reset':
out = load_all_stat('', 'chat_init')
save_all_stat(srv, 'chat', out)
reply_msg("Chat reset.")
return
elif msg[:5].lower() == '+gen ' or msg[:4].lower() == '+qa ' or msg.lower() == '+more' or msg.lower() == '+retry':
if msg[:5].lower() == '+gen ':
new = '\n' + msg[5:].strip()
# print(f'### prompt ###\n[{new}]')
current_state = None
out = run_rnn(tokenizer.tokenizer.encode(new))
save_all_stat(srv, 'gen_0', out)
elif msg[:4].lower() == '+qa ':
out = load_all_stat('', 'chat_init')
real_msg = msg[4:].strip()
new = f"{user}{interface} {real_msg}\n\n{bot}{interface}"
# print(f'### qa ###\n[{new}]')
out = run_rnn(tokenizer.tokenizer.encode(new))
save_all_stat(srv, 'gen_0', out)
# new = f"\nThe following is an excellent Q&A session consists of detailed and factual information.\n\nQ: What is 3+5?\nA: The answer is 8.\n\nQ: {msg[9:].strip()}\nA:"
# print(f'### prompt ###\n[{new}]')
# current_state = None
# out = run_rnn(tokenizer.tokenizer.encode(new))
# save_all_stat(srv, 'gen_0', out)
elif msg.lower() == '+more':
try:
out = load_all_stat(srv, 'gen_1')
save_all_stat(srv, 'gen_0', out)
except:
return
elif msg.lower() == '+retry':
try:
out = load_all_stat(srv, 'gen_0')
except:
return
begin = len(model_tokens)
out_last = begin
for i in range(150):
token = tokenizer.sample_logits(
out,
model_tokens,
args.ctx_len,
temperature=x_temp,
top_p_usual=x_top_p,
top_p_newline=x_top_p,
)
if msg[:4].lower() == '+qa ':
out = run_rnn([token], newline_adj=-1)
else:
out = run_rnn([token])
xxx = tokenizer.tokenizer.decode(model_tokens[out_last:])
if '\ufffd' not in xxx:
print(xxx, end='', flush=True)
out_last = begin + i + 1
print('\n')
# send_msg = tokenizer.tokenizer.decode(model_tokens[begin:]).strip()
# print(f'### send ###\n[{send_msg}]')
# reply_msg(send_msg)
save_all_stat(srv, 'gen_1', out)
else:
if msg.lower() == '+alt':
try:
out = load_all_stat(srv, 'chat_pre')
except:
return
else:
out = load_all_stat(srv, 'chat')
new = f"{user}{interface} {msg}\n\n{bot}{interface}"
# print(f'### add ###\n[{new}]')
out = run_rnn(tokenizer.tokenizer.encode(new), newline_adj=-999999999)
save_all_stat(srv, 'chat_pre', out)
begin = len(model_tokens)
out_last = begin
print(f'{bot}{interface}', end='', flush=True)
for i in range(999):
if i <= 0:
newline_adj = -999999999
elif i <= 30:
newline_adj = (i - 30) / 10
elif i <= 130:
newline_adj = 0
else:
newline_adj = (i - 130) * 0.25 # MUST END THE GENERATION
token = tokenizer.sample_logits(
out,
model_tokens,
args.ctx_len,
temperature=x_temp,
top_p_usual=x_top_p,
top_p_newline=x_top_p,
)
out = run_rnn([token], newline_adj=newline_adj)
xxx = tokenizer.tokenizer.decode(model_tokens[out_last:])
if '\ufffd' not in xxx:
print(xxx, end='', flush=True)
out_last = begin + i + 1
send_msg = tokenizer.tokenizer.decode(model_tokens[begin:])
if '\n\n' in send_msg:
send_msg = send_msg.strip()
break
# send_msg = tokenizer.tokenizer.decode(model_tokens[begin:]).strip()
# if send_msg.endswith(f'{user}{interface}'): # warning: needs to fix state too !!!
# send_msg = send_msg[:-len(f'{user}{interface}')].strip()
# break
# if send_msg.endswith(f'{bot}{interface}'):
# send_msg = send_msg[:-len(f'{bot}{interface}')].strip()
# break
# print(f'{model_tokens}')
# print(f'[{tokenizer.tokenizer.decode(model_tokens)}]')
# print(f'### send ###\n[{send_msg}]')
# reply_msg(send_msg)
save_all_stat(srv, 'chat', out) | null |
6,091 | import json, time, random, os
import numpy as np
import torch
from torch.nn import functional as F
time_slot = {}
time_ref = time.time_ns()
def record_time(name):
if name not in time_slot:
time_slot[name] = 1e20
tt = (time.time_ns() - time_ref) / 1e9
if tt < time_slot[name]:
time_slot[name] = tt | null |
6,092 | import json, time, random, os
import numpy as np
import torch
from torch.nn import functional as F
def FermatPrimalityTest(number):
if number > 1:
for time in range(3):
randomNumber = random.randint(2, number) - 1
if pow(randomNumber, number - 1, number) != 1:
return False
return True
else:
return False
def MillerRabinPrimalityTest(number):
if number == 2:
return True
elif number == 1 or number % 2 == 0:
return False
oddPartOfNumber = number - 1
timesTwoDividNumber = 0
while oddPartOfNumber % 2 == 0:
oddPartOfNumber = oddPartOfNumber // 2
timesTwoDividNumber = timesTwoDividNumber + 1
for time in range(3):
while True:
randomNumber = random.randint(2, number) - 1
if randomNumber != 0 and randomNumber != 1:
break
randomNumberWithPower = pow(randomNumber, oddPartOfNumber, number)
if (randomNumberWithPower != 1) and (randomNumberWithPower != number - 1):
iterationNumber = 1
while (iterationNumber <= timesTwoDividNumber - 1) and (randomNumberWithPower != number - 1):
randomNumberWithPower = pow(randomNumberWithPower, 2, number)
iterationNumber = iterationNumber + 1
if randomNumberWithPower != (number - 1):
return False
return True
def MaybeIsPrime(number):
if FermatPrimalityTest(number) and MillerRabinPrimalityTest(number):
return True
else:
return False | null |
6,093 | import os, math, time, datetime, subprocess
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
def my_save(args, trainer, dd, ff):
if '14b-run1' in ff:
fn = ff.split('/')[-1]
fff = '/dev/shm/' + fn
torch.save(dd, fff)
subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True)
elif ('world/14b' in ff) or ('world/7b' in ff):
aa = ff.split('/')[1]
fn = ff.split('/')[-1]
fff = f'/dev/shm/{aa}-{fn}'
torch.save(dd, fff)
subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True)
else:
if 'deepspeed_stage_3' in args.strategy:
trainer.save_checkpoint(ff, weights_only=True)
else:
torch.save(dd, ff) | null |
6,094 | import os, math, time, datetime, subprocess
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
def generate_init_weight(model, init_weight_name):
mm = model.generate_init_weight()
if model.args.my_pile_stage == 1:
if len(model.args.load_model) > 0:
print(f"Combine weights from {model.args.load_model}...")
load_dict = torch.load(model.args.load_model, map_location="cpu")
for k in load_dict:
try:
assert k in mm
except:
print('missing', k)
exit(0)
src = load_dict[k]
try:
mm[k] = src.reshape(mm[k].shape)
except:
tmp = mm[k].squeeze().clone()
print(k, src.shape, '-->', mm[k].shape)
ss = src.shape[0]
dd = tmp.shape[0]
for i in range(dd):
pos = i / dd * ss
if pos >= ss - 1:
tmp[i] = src[ss-1]
else:
p0 = int(math.floor(pos))
ii = pos - p0
tmp[i] = src[p0] * (1-ii) + src[p0+1] * (ii)
mm[k] = tmp.reshape(mm[k].shape)
sss = src.squeeze().float().cpu().numpy()
print(sss[:10], '...', sss[-10:])
mmm = mm[k].squeeze().float().cpu().numpy()
print(mmm[:10], '...', mmm[-10:])
print(f"Save to {init_weight_name}...")
torch.save(mm, init_weight_name)
if model.args.my_pile_stage == 1:
print("Done. Now go for stage 2.")
exit(0) | null |
6,095 | from lib2to3.pgen2 import token
import os
import torch
import numpy as np
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
def print_rank_0(*message):
pass
# """If distributed is initialized print only on rank 0."""
# if torch.distributed.is_initialized():
# if torch.distributed.get_rank() == 0:
# print(*message, flush=True)
# else:
# print(*message, flush=True) | null |
6,096 | from lib2to3.pgen2 import token
import os
import torch
import numpy as np
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
def _warmup_mmap_file(path):
pass
# with open(path, "rb") as stream:
# while stream.read(100 * 1024 * 1024):
# pass | null |
6,097 | from lib2to3.pgen2 import token
import os
import torch
import numpy as np
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: float,
7: np.double,
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype) | null |
6,098 | from lib2to3.pgen2 import token
import os
import torch
import numpy as np
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
def index_file_path(prefix_path):
return prefix_path + ".idx" | null |
6,099 | from lib2to3.pgen2 import token
import os
import torch
import numpy as np
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
def data_file_path(prefix_path):
return prefix_path + ".bin" | null |
6,100 | import types
import torch
import math, os, gc
from torch.nn import functional as F
import torch.nn as nn
from typing import List, Dict
def __nop(ob):
return ob | null |
6,101 | import os, math, gc, importlib
import torch
import torch.nn as nn
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
from torch.utils.cpp_extension import load
def __nop(ob):
return ob | null |
6,102 | import os, math, gc, importlib
import torch
import torch.nn as nn
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
from torch.utils.cpp_extension import load
if os.environ["RWKV_FLOAT_MODE"] == "bf16":
wkv_cuda = load(name=f"wkv_{T_MAX}_bf16", sources=["cuda/wkv_op_bf16.cpp", "cuda/wkv_cuda_bf16.cu"], verbose=True, extra_cuda_cflags=["-t 4", "-std=c++17", "-res-usage", "--maxrregcount 60", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-DTmax={T_MAX}"])
class WKV(torch.autograd.Function):
def forward(ctx, B, T, C, w, u, k, v):
def backward(ctx, gy):
else:
wkv_cuda = load(name=f"wkv_{T_MAX}", sources=["cuda/wkv_op.cpp", "cuda/wkv_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--maxrregcount 60", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-DTmax={T_MAX}"])
class WKV(torch.autograd.Function):
def forward(ctx, B, T, C, w, u, k, v):
def backward(ctx, gy):
def RUN_CUDA(B, T, C, w, u, k, v):
return WKV.apply(B, T, C, w, u, k, v) | null |
6,103 | import os, math, gc, importlib
import torch
import torch.nn as nn
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
from torch.utils.cpp_extension import load
if 'r4' in os.environ["RWKV_MY_TESTING"]:
HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"])
wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"],
verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"])
class WKV_5(torch.autograd.Function):
def forward(ctx, B, T, C, H, r, k, v, w, u):
with torch.no_grad():
assert r.dtype == torch.bfloat16
assert k.dtype == torch.bfloat16
assert v.dtype == torch.bfloat16
assert w.dtype == torch.bfloat16
assert u.dtype == torch.bfloat16
assert HEAD_SIZE == C // H
ctx.B = B
ctx.T = T
ctx.C = C
ctx.H = H
assert r.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
assert w.is_contiguous()
assert u.is_contiguous()
ew = (-torch.exp(w.float())).contiguous()
eew = (torch.exp(ew)).contiguous()
ctx.save_for_backward(r, k, v, eew, ew, u)
y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1)
wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y)
return y
def backward(ctx, gy):
with torch.no_grad():
assert gy.dtype == torch.bfloat16
B = ctx.B
T = ctx.T
C = ctx.C
H = ctx.H
assert gy.is_contiguous()
r, k, v, eew, ew, u = ctx.saved_tensors
gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1)
gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1)
gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1)
gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1)
gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1)
wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu)
gw = torch.sum(gw, 0).view(H, C//H)
gu = torch.sum(gu, 0).view(H, C//H)
return (None, None, None, None, gr, gk, gv, gw, gu)
def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u):
return WKV_5.apply(B, T, C, H, r, k, v, w, u) | null |
6,104 | import numpy as np
import os, math, gc
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision as vision
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
import deepspeed
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
import clip
from transformers import CLIPModel
def __nop(ob):
return ob | null |
6,105 | import numpy as np
import os, math, gc
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision as vision
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
import deepspeed
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
import clip
from transformers import CLIPModel
def cosine_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return 1 - torch.einsum('ij,ij->i',[x,y]) | null |
6,106 | import numpy as np
import math, os, sys, types, time, gc
import torch
from src.utils import TOKENIZER
from src.model_run import RWKV_RNN
time_slot = {}
time_ref = time.time_ns()
def record_time(name):
if name not in time_slot:
time_slot[name] = 1e20
tt = (time.time_ns() - time_ref) / 1e9
if tt < time_slot[name]:
time_slot[name] = tt | null |
6,107 | import os
import json
import random
import numpy as np
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
def to_float(x):
return x.cpu().detach().numpy().flatten()[0].astype(float) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.