id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
3,266 | import argparse
import logging
import os
import re
import io
import textwrap
from os import path
def find_python_files(directory):
for root, dirs, files in os.walk(directory):
for filename in files:
if filename.endswith('.py'):
yield path.join(root, filename) | null |
3,267 | import argparse
import logging
import contextlib
import datetime
import os
import re
import subprocess
import unittest
from os import path
The provided code snippet includes necessary dependencies for implementing the `find_files` function. Write a Python function `def find_files(rootdir, regexp_files, ignore_dirs)` to solve the following problem:
Find the files we need to apply this to.
Here is the function:
def find_files(rootdir, regexp_files, ignore_dirs):
"""Find the files we need to apply this to."""
for root, dirs, files in os.walk(rootdir):
with contextlib.suppress(ValueError):
dirs.remove('build')
dirs[:] = [dirname
for dirname in dirs
if not re.match(ignore_dirs, dirname)]
for filename in files:
if re.match(regexp_files, filename):
yield path.join(root, filename) | Find the files we need to apply this to. |
3,268 | import argparse
import logging
import contextlib
import datetime
import os
import re
import subprocess
import unittest
from os import path
LICENSE = '__license__ = "GNU GPLv2"'
def find_existing_copyright(lines):
"""Find the line numbers for an existing copyright.
Returns:
Two integers, one for the copyright line and one for the license
line. If the patterns aren't found return None instead of the line number.
"""
indexes = []
for pattern in [
'__copyright__ = .* Martin Blais',
'__license__ = ',
'__author__ = ',
]:
for index, line in enumerate(lines):
if re.match(pattern, line):
break
else:
index = None
indexes.append(index)
return tuple(indexes)
def find_start(lines):
contents = ''.join(line + os.linesep for line in lines)
start = 0
while True:
match = re.match(r'(^#[^\n]*|""".*?""".*?)\n', contents[start:], re.DOTALL)
if match:
start += match.end()
else:
break
return len(contents[:start].splitlines())
def get_copyright(filename, prev_line, cwd):
"""Get the copyright string."""
historical_years = parse_years_from_copyright(prev_line)
change_years = get_change_years(filename, cwd)
combined_years = sorted(set(historical_years) | set(change_years))
years_str = format_years(compress_years(combined_years))
return COPYRIGHT.format(years=years_str)
The provided code snippet includes necessary dependencies for implementing the `process` function. Write a Python function `def process(filename, contents)` to solve the following problem:
Process the copyright on a single file, return the modified contents.
Here is the function:
def process(filename, contents):
"""Process the copyright on a single file, return the modified contents."""
logging.info('Processing {:60}'.format(filename))
# pylint: disable=unbalanced-tuple-unpacking
lines = contents.splitlines()
copyright_index, license_index, author_index = find_existing_copyright(lines)
# Update copyright and license lines.
for index, updated_line in [
(copyright_index, get_copyright(filename, lines[copyright_index],
cwd=path.dirname(filename))),
(license_index, LICENSE),
]:
if index is None:
logging.error("Line not found in file: {}".format(updated_line))
start_index = find_start(lines)
lines[start_index:start_index] = [updated_line]
else:
existing_line = lines[index]
if existing_line != updated_line:
logging.warning('Replacing line:\n{}\n{}'.format(existing_line,
updated_line))
lines[index] = updated_line
# Remove author line, if present.
if author_index is not None:
logging.info("Removing author line at {}:{}".format(filename, author_index))
del lines[author_index]
return ''.join(line + os.linesep for line in lines) | Process the copyright on a single file, return the modified contents. |
3,269 | import inspect
import os
import json
import threading
import traceback
import requests
import time
import asyncio
import aiohttp
from PyQt5.QtCore import pyqtSignal, QObject
from ..common.config import cfg, Language
from ..common.logger import logger
from ..common.signals import signalBus
from ..common.util import getPortTokenServerByPid
from .exceptions import *
TAG = "Connector"
connector = LolClientConnector()
logger = Logger("Seraphine")
signalBus = SignalBus()
class SummonerNotFound(BaseException):
pass
class RetryMaximumAttempts(BaseException):
pass
def retry(count=5, retry_sep=0):
def decorator(func):
async def wrapper(*args, **kwargs):
logger.info(f"call %s" % func.__name__, TAG)
# 获取函数的参数信息
func_params = inspect.signature(func).parameters
param_names = list(func_params.keys())
tmp_args = args
if param_names[0] == "self":
# args[0] 是 self(connector) 的实例, 兼容静态方法
param_names = param_names[1:]
tmp_args = args[1:]
# 构建参数字典,将参数名与对应的实参值一一对应
params_dict = {param: arg for param,
arg in zip(param_names, tmp_args)}
logger.debug(f"args = {params_dict}|kwargs = {kwargs}", TAG)
# logger.debug(f"args = {args[1:]}|kwargs = {kwargs}", TAG)
exce = None
for _ in range(count):
try:
async with connector.semaphore:
res = await func(*args, **kwargs)
except BaseException as e:
time.sleep(retry_sep)
exce = e
if isinstance(e, SummonerNotFound): # SummonerNotFound 再重试会报 429 (限流)
raise e
continue
else:
break
else:
# 有异常抛异常, 没异常抛 RetryMaximumAttempts
exce = exce if exce else RetryMaximumAttempts(
"Exceeded maximum retry attempts.")
# ReferenceError 为 LCU 未就绪仍有请求发送时抛出, 直接吞掉不用提示
# 其余异常弹一个提示
if type(exce) is not ReferenceError:
signalBus.lcuApiExceptionRaised.emit(
func.__name__, exce)
logger.exception(f"exit {func.__name__}", exce, TAG)
raise exce
logger.info(f"exit {func.__name__}", TAG)
logger.debug(f"result = {res}", TAG)
return res
return wrapper
return decorator | null |
3,270 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
def getTeammates(game, targetPuuid):
"""
通过 game 信息获取目标召唤师的队友
"""
targetParticipantId = None
for participant in game['participantIdentities']:
puuid = participant['player']['puuid']
if puuid == targetPuuid:
targetParticipantId = participant['participantId']
break
assert targetParticipantId is not None
for player in game['participants']:
if player['participantId'] == targetParticipantId:
if game['queueId'] != 1700:
tid = player['teamId']
else: # 斗魂竞技场
tid = player['stats']['subteamPlacement']
win = player['stats']['win']
remake = player['stats']['teamEarlySurrendered']
break
res = {
'queueId': game['queueId'],
'win': win,
'remake': remake,
'summoners': [], # 队友召唤师 (由于兼容性, 未修改字段名)
'enemies': [] # 对面召唤师, 若有多个队伍会全放这里面
}
for player in game['participants']:
if game['queueId'] != 1700:
cmp = player['teamId']
else:
cmp = player['stats']['subteamPlacement']
p = player['participantId']
s = game['participantIdentities'][p - 1]['player']
if cmp == tid:
if s['puuid'] != targetPuuid:
res['summoners'].append(
{'summonerId': s['summonerId'], 'name': s['summonerName'], 'puuid': s['puuid'], 'icon': s['profileIcon']})
else:
# 当前召唤师在该对局使用的英雄, 自定义对局没有该字段
res["championId"] = player.get('championId', -1)
else:
res['enemies'].append(
{'summonerId': s['summonerId'], 'name': s['summonerName'], 'puuid': s['puuid'],
'icon': s['profileIcon']})
return res
connector = LolClientConnector()
async def getRecentTeammates(games, puuid):
summoners = {}
for game in games:
gameId = game['gameId']
game = await connector.getGameDetailByGameId(gameId)
teammates = getTeammates(game, puuid)
for p in teammates['summoners']:
if p['summonerId'] == 0:
continue
if p['puuid'] not in summoners:
summonerIcon = await connector.getProfileIcon(p['icon'])
summoners[p['puuid']] = {
"name": p['name'], 'icon': summonerIcon,
"total": 0, "wins": 0, "losses": 0, "puuid": p["puuid"]}
summoners[p['puuid']]['total'] += 1
if not teammates['remake']:
if teammates['win']:
summoners[p['puuid']]['wins'] += 1
else:
summoners[p['puuid']]['losses'] += 1
ret = {"puuid": puuid, "summoners": [
item for item in summoners.values()]}
ret['summoners'] = sorted(ret['summoners'],
key=lambda x: x['total'], reverse=True)[:5]
return ret | null |
3,271 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
async def parseGameData(game):
timeStamp = game["gameCreation"] # 毫秒级时间戳
time = timeStampToStr(game['gameCreation'])
shortTime = timeStampToShortStr(game['gameCreation'])
gameId = game['gameId']
duration = secsToStr(game['gameDuration'])
queueId = game['queueId']
nameAndMap = connector.manager.getNameMapByQueueId(queueId)
modeName = nameAndMap['name']
if queueId != 0:
mapName = nameAndMap['map']
else:
mapName = connector.manager.getMapNameById(game['mapId'])
participant = game['participants'][0]
championId = participant['championId']
championIcon = await connector.getChampionIcon(championId)
spell1Id = participant['spell1Id']
spell2Id = participant['spell2Id']
spell1Icon = await connector.getSummonerSpellIcon(spell1Id)
spell2Icon = await connector.getSummonerSpellIcon(spell2Id)
stats = participant['stats']
champLevel = stats['champLevel']
kills = stats['kills']
deaths = stats['deaths']
assists = stats['assists']
itemIds = [
stats['item0'],
stats['item1'],
stats['item2'],
stats['item3'],
stats['item4'],
stats['item5'],
stats['item6'],
]
itemIcons = [await connector.getItemIcon(itemId) for itemId in itemIds]
runeId = stats['perk0']
runeIcon = await connector.getRuneIcon(runeId)
cs = stats['totalMinionsKilled'] + stats['neutralMinionsKilled']
gold = stats['goldEarned']
remake = stats['gameEndedInEarlySurrender']
win = stats['win']
timeline = participant['timeline']
lane = timeline['lane']
role = timeline['role']
position = None
pt = ToolsTranslator()
if queueId in [420, 440]:
if lane == 'TOP':
position = pt.top
elif lane == "JUNGLE":
position = pt.jungle
elif lane == 'MIDDLE':
position = pt.middle
elif role == 'SUPPORT':
position = pt.support
elif lane == 'BOTTOM' and role == 'CARRY':
position = pt.bottom
return {
'queueId': queueId,
'gameId': gameId,
'time': time,
'shortTime': shortTime,
'name': modeName,
'map': mapName,
'duration': duration,
'remake': remake,
'win': win,
'championId': championId,
'championIcon': championIcon,
'spell1Icon': spell1Icon,
'spell2Icon': spell2Icon,
'champLevel': champLevel,
'kills': kills,
'deaths': deaths,
'assists': assists,
'itemIcons': itemIcons,
'runeIcon': runeIcon,
'cs': cs,
'gold': gold,
'timeStamp': timeStamp,
'position': position,
}
def getRecentChampions(games):
champions = {}
for game in games:
if game['queueId'] == 0:
continue
championId = game['championId']
if championId not in champions:
champions[championId] = {
'icon': game['championIcon'], 'wins': 0, 'losses': 0, 'total': 0}
champions[championId]['total'] += 1
if not game['remake']:
if game['win']:
champions[championId]['wins'] += 1
else:
champions[championId]['losses'] += 1
ret = [item for item in champions.values()]
ret.sort(key=lambda x: x['total'], reverse=True)
maxLen = 10
return ret if len(ret) < maxLen else ret[:maxLen]
cfg = Config()
connector = LolClientConnector()
async def parseSummonerData(summoner):
iconId = summoner['profileIconId']
icon = await connector.getProfileIcon(iconId)
level = summoner['summonerLevel']
xpSinceLastLevel = summoner['xpSinceLastLevel']
xpUntilNextLevel = summoner['xpUntilNextLevel']
rankInfo = await connector.getRankedStatsByPuuid(summoner['puuid'])
try:
gamesInfo = await connector.getSummonerGamesByPuuid(
summoner['puuid'], 0, cfg.get(cfg.careerGamesNumber) - 1)
except:
champions = []
games = {}
else:
games = {
"gameCount": gamesInfo["gameCount"],
"wins": 0,
"losses": 0,
"kills": 0,
"deaths": 0,
"assists": 0,
"games": [],
}
for game in gamesInfo["games"]:
info = await parseGameData(game)
if time.time() - info["timeStamp"] / 1000 > 60 * 60 * 24 * 365:
continue
if not info["remake"] and info["queueId"] != 0:
games["kills"] += info["kills"]
games["deaths"] += info["deaths"]
games["assists"] += info["assists"]
if info["win"]:
games["wins"] += 1
else:
games["losses"] += 1
games["games"].append(info)
champions = getRecentChampions(games['games'])
return {
'name': summoner.get("gameName") or summoner['displayName'],
'icon': icon,
'level': level,
'xpSinceLastLevel': xpSinceLastLevel,
'xpUntilNextLevel': xpUntilNextLevel,
'puuid': summoner['puuid'],
'rankInfo': rankInfo,
'games': games,
'champions': champions,
'isPublic': summoner['privacy'] == "PUBLIC",
'tagLine': summoner.get("tagLine"),
} | null |
3,272 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
def translateTier(orig: str, short=False) -> str:
def timeStampToStr(stamp):
def secsToStr(secs):
cfg = Config()
connector = LolClientConnector()
async def parseGameDetailData(puuid, game):
queueId = game['queueId']
mapId = game['mapId']
names = connector.manager.getNameMapByQueueId(queueId)
modeName = names['name']
if queueId != 0:
mapName = names['map']
else:
mapName = connector.manager.getMapNameById(mapId)
def origTeam(teamId):
return {
'win': None,
'bans': [],
'baronKills': 0,
'baronIcon': f"app/resource/images/baron-{teamId}.png",
'dragonKills': 0,
'dragonIcon': f'app/resource/images/dragon-{teamId}.png',
'riftHeraldKills': 0,
'riftHeraldIcon': f'app/resource/images/herald-{teamId}.png',
'inhibitorKills': 0,
'inhibitorIcon': f'app/resource/images/inhibitor-{teamId}.png',
'towerKills': 0,
'towerIcon': f'app/resource/images/tower-{teamId}.png',
'kills': 0,
'deaths': 0,
'assists': 0,
'gold': 0,
'summoners': []
}
teams = {
100: origTeam("100"),
200: origTeam("200"),
300: origTeam("100"),
400: origTeam("200")
}
cherryResult = None
for team in game['teams']:
teamId = team['teamId']
if teamId == 0:
teamId = 200
teams[teamId]['win'] = team['win']
teams[teamId]['bans'] = [
await connector.getChampionIcon(item['championId'])
for item in team['bans']
]
teams[teamId]['baronKills'] = team['baronKills']
teams[teamId]['dragonKills'] = team['dragonKills']
teams[teamId]['riftHeraldKills'] = team['riftHeraldKills']
teams[teamId]['towerKills'] = team['towerKills']
teams[teamId]['inhibitorKills'] = team['inhibitorKills']
for participant in game['participantIdentities']:
participantId = participant['participantId']
summonerName = participant['player'].get(
'gameName') or participant['player'].get('summonerName') # 兼容外服
summonerPuuid = participant['player']['puuid']
isCurrent = (summonerPuuid == puuid)
if summonerPuuid == '00000000-0000-0000-0000-000000000000': # AI
isPublic = True
else:
t = await connector.getSummonerByPuuid(summonerPuuid)
isPublic = t["privacy"] == "PUBLIC"
for summoner in game['participants']:
if summoner['participantId'] == participantId:
stats = summoner['stats']
if queueId != 1700:
subteamPlacement = None
tid = summoner['teamId']
else:
subteamPlacement = stats['subteamPlacement']
tid = subteamPlacement * 100
if isCurrent:
remake = stats['gameEndedInEarlySurrender']
win = stats['win']
if queueId == 1700:
cherryResult = subteamPlacement
championId = summoner['championId']
championIcon = await connector.getChampionIcon(championId)
spell1Id = summoner['spell1Id']
spell1Icon = await connector.getSummonerSpellIcon(spell1Id)
spell2Id = summoner['spell2Id']
spell2Icon = await connector.getSummonerSpellIcon(spell2Id)
kills = stats['kills']
deaths = stats['deaths']
assists = stats['assists']
gold = stats['goldEarned']
teams[tid]['kills'] += kills
teams[tid]['deaths'] += deaths
teams[tid]['assists'] += assists
teams[tid]['gold'] += gold
runeIcon = await connector.getRuneIcon(stats['perk0'])
itemIds = [
stats['item0'],
stats['item1'],
stats['item2'],
stats['item3'],
stats['item4'],
stats['item5'],
stats['item6'],
]
itemIcons = [
await connector.getItemIcon(itemId) for itemId in itemIds
]
getRankInfo = cfg.get(cfg.showTierInGameInfo)
tier, division, lp, rankIcon = None, None, None, None
if getRankInfo:
rank = await connector.getRankedStatsByPuuid(
summonerPuuid)
rank = rank.get('queueMap')
try:
if queueId != 1700 and rank:
rankInfo = rank[
'RANKED_FLEX_SR'] if queueId == 440 else rank['RANKED_SOLO_5x5']
tier = rankInfo['tier']
division = rankInfo['division']
lp = rankInfo['leaguePoints']
if tier == '':
rankIcon = 'app/resource/images/unranked.png'
else:
rankIcon = f'app/resource/images/{tier.lower()}.png'
tier = translateTier(tier, True)
if division == 'NA':
division = ''
else:
rankInfo = rank["CHERRY"]
lp = rankInfo['ratedRating']
except KeyError:
...
item = {
'summonerName': summonerName,
'puuid': summonerPuuid,
'isCurrent': isCurrent,
'championIcon': championIcon,
'rankInfo': getRankInfo,
'tier': tier,
'division': division,
'lp': lp,
'rankIcon': rankIcon,
'spell1Icon': spell1Icon,
'spell2Icon': spell2Icon,
'itemIcons': itemIcons,
'kills': kills,
'deaths': deaths,
'assists': assists,
'cs': stats['totalMinionsKilled'] + stats['neutralMinionsKilled'],
'gold': gold,
'runeIcon': runeIcon,
'champLevel': stats['champLevel'],
'demage': stats['totalDamageDealtToChampions'],
'subteamPlacement': subteamPlacement,
'isPublic': isPublic
}
teams[tid]['summoners'].append(item)
break
mapIcon = connector.manager.getMapIconByMapId(mapId, win)
return {
'gameId': game['gameId'],
'mapIcon': mapIcon,
'gameCreation': timeStampToStr(game['gameCreation']),
'gameDuration': secsToStr(game['gameDuration']),
'modeName': modeName,
'mapName': mapName,
'queueId': queueId,
'win': win,
'cherryResult': cherryResult,
'remake': remake,
'teams': teams,
} | null |
3,273 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
class ToolsTranslator(QObject):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.top = self.tr("TOP")
self.jungle = self.tr("JUG")
self.middle = self.tr("MID")
self.bottom = self.tr("BOT")
self.support = self.tr("SUP")
self.rankedSolo = self.tr('Ranked Solo')
self.rankedFlex = self.tr("Ranked Flex")
def translateTier(orig: str, short=False) -> str:
if orig == '':
return "--"
maps = {
'Iron': ['坚韧黑铁', '黑铁'],
'Bronze': ['英勇黄铜', '黄铜'],
'Silver': ['不屈白银', '白银'],
'Gold': ['荣耀黄金', '黄金'],
'Platinum': ['华贵铂金', '铂金'],
'Emerald': ['流光翡翠', '翡翠'],
'Diamond': ['璀璨钻石', '钻石'],
'Master': ['超凡大师', '大师'],
'Grandmaster': ['傲世宗师', '宗师'],
'Challenger': ['最强王者', '王者'],
}
index = 1 if short else 0
if cfg.language.value == Language.ENGLISH:
return orig.capitalize()
else:
return maps[orig.capitalize()][index]
def parseDetailRankInfo(rankInfo):
soloRankInfo = rankInfo['queueMap']['RANKED_SOLO_5x5']
soloTier = translateTier(soloRankInfo['tier'])
soloDivision = soloRankInfo['division']
if soloTier == '--' or soloDivision == 'NA':
soloDivision = ""
soloHighestTier = translateTier(soloRankInfo['highestTier'])
soloHighestDivision = soloRankInfo['highestDivision']
if soloHighestTier == '--' or soloHighestDivision == 'NA':
soloHighestDivision = ""
solxPreviousSeasonEndTier = translateTier(
soloRankInfo['previousSeasonEndTier'])
soloPreviousSeasonDivision = soloRankInfo[
'previousSeasonEndDivision']
if solxPreviousSeasonEndTier == '--' or soloPreviousSeasonDivision == 'NA':
soloPreviousSeasonDivision = ""
soloWins = soloRankInfo['wins']
soloLosses = soloRankInfo['losses']
soloTotal = soloWins + soloLosses
soloWinRate = soloWins * 100 // soloTotal if soloTotal != 0 else 0
soloLp = soloRankInfo['leaguePoints']
flexRankInfo = rankInfo['queueMap']['RANKED_FLEX_SR']
flexTier = translateTier(flexRankInfo['tier'])
flexDivision = flexRankInfo['division']
if flexTier == '--' or flexDivision == 'NA':
flexDivision = ""
flexHighestTier = translateTier(flexRankInfo['highestTier'])
flexHighestDivision = flexRankInfo['highestDivision']
if flexHighestTier == '--' or flexHighestDivision == 'NA':
flexHighestDivision = ""
flexPreviousSeasonEndTier = translateTier(
flexRankInfo['previousSeasonEndTier'])
flexPreviousSeasonEndDivision = flexRankInfo[
'previousSeasonEndDivision']
if flexPreviousSeasonEndTier == '--' or flexPreviousSeasonEndDivision == 'NA':
flexPreviousSeasonEndDivision = ""
flexWins = flexRankInfo['wins']
flexLosses = flexRankInfo['losses']
flexTotal = flexWins + flexLosses
flexWinRate = flexWins * 100 // flexTotal if flexTotal != 0 else 0
flexLp = flexRankInfo['leaguePoints']
t = ToolsTranslator()
return [
[
t.rankedSolo,
str(soloTotal),
str(soloWinRate) + ' %' if soloTotal != 0 else '--',
str(soloWins),
str(soloLosses),
f'{soloTier} {soloDivision}',
str(soloLp),
f'{soloHighestTier} {soloHighestDivision}',
f'{solxPreviousSeasonEndTier} {soloPreviousSeasonDivision}',
],
[
t.rankedFlex,
str(flexTotal),
str(flexWinRate) + ' %' if flexTotal != 0 else '--',
str(flexWins),
str(flexLosses),
f'{flexTier} {flexDivision}',
str(flexLp),
f'{flexHighestTier} {flexHighestDivision}',
f'{flexPreviousSeasonEndTier} {flexPreviousSeasonEndDivision}',
],
] | null |
3,274 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
async def parseSummonerGameInfo(item, isRank, currentSummonerId):
async def parseAllyGameInfo(session, currentSummonerId):
# 排位会有预选位
isRank = bool(session["myTeam"][0]["assignedPosition"])
tasks = [parseSummonerGameInfo(item, isRank, currentSummonerId)
for item in session['myTeam']]
summoners = await asyncio.gather(*tasks)
summoners = [summoner for summoner in summoners if summoner]
# 按照楼层排序
summoners = sorted(
summoners, key=lambda x: x["cellId"])
champions = {summoner['summonerId']: summoner['championId']
for summoner in summoners}
order = [summoner['summonerId'] for summoner in summoners]
return {'summoners': summoners, 'champions': champions, 'order': order} | null |
3,275 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
def parseSummonerOrder(team):
summoners = [{
'summonerId': s['summonerId'],
'cellId': s['cellId']
} for s in team]
summoners.sort(key=lambda x: x['cellId'])
return [s['summonerId'] for s in summoners if s['summonerId'] != 0] | null |
3,276 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
def sortedSummonersByGameRole(summoners: list):
position = ["TOP", "JUNGLE", "MIDDLE", "BOTTOM", "UTILITY"]
if any(x['selectedPosition'] not in position for x in summoners):
return None
return sorted(summoners,
key=lambda x: position.index(x['selectedPosition']))
def separateTeams(data, currentSummonerId):
team1 = data['teamOne']
team2 = data['teamTwo']
ally = None
enemy = None
for summoner in team1:
if summoner.get('summonerId') == currentSummonerId:
ally = team1
enemy = team2
break
else:
ally = team2
enemy = team1
return ally, enemy
async def parseSummonerGameInfo(item, isRank, currentSummonerId):
summonerId = item.get('summonerId')
if summonerId == 0 or summonerId == None:
return None
summoner = await connector.getSummonerById(summonerId)
championId = item.get('championId') or 0
icon = await connector.getChampionIcon(championId)
puuid = summoner["puuid"]
origRankInfo = await connector.getRankedStatsByPuuid(puuid)
rankInfo = parseRankInfo(origRankInfo)
try:
origGamesInfo = await connector.getSummonerGamesByPuuid(
puuid, 0, 14)
if cfg.get(cfg.gameInfoFilter) and isRank:
origGamesInfo["games"] = [
game for game in origGamesInfo["games"] if game["queueId"] in (420, 440)]
begIdx = 15
while len(origGamesInfo["games"]) < 11 and begIdx <= 95:
endIdx = begIdx + 5
new = (await connector.getSummonerGamesByPuuid(puuid, begIdx, endIdx))["games"]
for game in new:
if game["queueId"] in (420, 440):
origGamesInfo['games'].append(game)
begIdx = endIdx + 1
except:
gamesInfo = []
else:
tasks = [parseGameData(game)
for game in origGamesInfo["games"][:11]]
gamesInfo = await asyncio.gather(*tasks)
_, kill, deaths, assists, _, _ = parseGames(gamesInfo)
teammatesInfo = [
getTeammates(
await connector.getGameDetailByGameId(game["gameId"]),
puuid
) for game in gamesInfo[:1] # 避免空报错, 查上一局的队友(对手)
]
recentlyChampionName = ""
fateFlag = None
if teammatesInfo: # 判个空, 避免太久没有打游戏的玩家或新号引发异常
if currentSummonerId in [t['summonerId'] for t in teammatesInfo[0]['summoners']]:
# 上把队友
fateFlag = "ally"
elif currentSummonerId in [t['summonerId'] for t in teammatesInfo[0]['enemies']]:
# 上把对面
fateFlag = "enemy"
recentlyChampionId = max(
teammatesInfo and teammatesInfo[0]['championId'], 0) # 取不到时是-1, 如果-1置为0
recentlyChampionName = connector.manager.champs.get(
recentlyChampionId)
return {
"name": summoner["gameName"] or summoner["displayName"],
'tagLine': summoner.get("tagLine"),
"icon": icon,
'championId': championId,
"level": summoner["summonerLevel"],
"rankInfo": rankInfo,
"gamesInfo": gamesInfo,
"xpSinceLastLevel": summoner["xpSinceLastLevel"],
"xpUntilNextLevel": summoner["xpUntilNextLevel"],
"puuid": puuid,
"summonerId": summonerId,
"kda": [kill, deaths, assists],
"cellId": item.get("cellId"),
"selectedPosition": item.get("selectedPosition"),
"fateFlag": fateFlag,
"isPublic": summoner["privacy"] == "PUBLIC",
# 最近游戏的英雄 (用于上一局与与同一召唤师游玩之后显示)
"recentlyChampionName": recentlyChampionName
}
async def parseGameInfoByGameflowSession(session, currentSummonerId, side):
data = session['gameData']
queueId = data['queue']['id']
if queueId in (1700, 1090, 1100, 1110, 1130, 1160): # 斗魂 云顶匹配 (排位)
return None
isRank = queueId in (420, 440)
if side == 'enemy':
_, team = separateTeams(data, currentSummonerId)
else:
team, _ = separateTeams(data, currentSummonerId)
tasks = [parseSummonerGameInfo(item, isRank, currentSummonerId)
for item in team]
summoners = await asyncio.gather(*tasks)
summoners = [summoner for summoner in summoners if summoner]
if isRank:
s = sortedSummonersByGameRole(summoners)
if s != None:
summoners = s
champions = {summoner['summonerId']: summoner['championId']
for summoner in summoners}
order = [summoner['summonerId'] for summoner in summoners]
return {'summoners': summoners, 'champions': champions, 'order': order} | null |
3,277 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
def sortedSummonersByGameRole(summoners: list):
position = ["TOP", "JUNGLE", "MIDDLE", "BOTTOM", "UTILITY"]
if any(x['selectedPosition'] not in position for x in summoners):
return None
return sorted(summoners,
key=lambda x: position.index(x['selectedPosition']))
def separateTeams(data, currentSummonerId):
team1 = data['teamOne']
team2 = data['teamTwo']
ally = None
enemy = None
for summoner in team1:
if summoner.get('summonerId') == currentSummonerId:
ally = team1
enemy = team2
break
else:
ally = team2
enemy = team1
return ally, enemy
def getAllyOrderByGameRole(session, currentSummonerId):
data = session['gameData']
queueId = data['queue']['id']
# 只有排位模式下有返回值
if queueId not in (420, 440):
return None
ally, _ = separateTeams(data, currentSummonerId)
ally = sortedSummonersByGameRole(ally)
if ally == None:
return None
return [x['summonerId'] for x in ally] | null |
3,278 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
def separateTeams(data, currentSummonerId):
team1 = data['teamOne']
team2 = data['teamTwo']
ally = None
enemy = None
for summoner in team1:
if summoner.get('summonerId') == currentSummonerId:
ally = team1
enemy = team2
break
else:
ally = team2
enemy = team1
return ally, enemy
The provided code snippet includes necessary dependencies for implementing the `getTeamColor` function. Write a Python function `def getTeamColor(session, currentSummonerId)` to solve the following problem:
输入 session 以及当前召唤师 id,输出 summonerId -> 颜色的映射
Here is the function:
def getTeamColor(session, currentSummonerId):
'''
输入 session 以及当前召唤师 id,输出 summonerId -> 颜色的映射
'''
data = session['gameData']
ally, enemy = separateTeams(data, currentSummonerId)
def makeTeam(team):
# teamParticipantId => [summonerId]
tIdToSIds = {}
for s in team:
summonerId = s.get('summonerId')
if not summonerId:
continue
teamParticipantId = s.get('teamParticipantId')
if not teamParticipantId:
continue
summoners = tIdToSIds.get(teamParticipantId)
if not summoners:
tIdToSIds[teamParticipantId] = [summonerId]
else:
tIdToSIds[teamParticipantId].append(summonerId)
# summonerId => color
res = {}
currentColor = 0
for ids in tIdToSIds.values():
if len(ids) == 1:
res[ids[0]] = -1
else:
for id in ids:
res[id] = currentColor
currentColor += 1
return res
return makeTeam(ally), makeTeam(enemy) | 输入 session 以及当前召唤师 id,输出 summonerId -> 颜色的映射 |
3,279 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
async def parseGameData(game):
timeStamp = game["gameCreation"] # 毫秒级时间戳
time = timeStampToStr(game['gameCreation'])
shortTime = timeStampToShortStr(game['gameCreation'])
gameId = game['gameId']
duration = secsToStr(game['gameDuration'])
queueId = game['queueId']
nameAndMap = connector.manager.getNameMapByQueueId(queueId)
modeName = nameAndMap['name']
if queueId != 0:
mapName = nameAndMap['map']
else:
mapName = connector.manager.getMapNameById(game['mapId'])
participant = game['participants'][0]
championId = participant['championId']
championIcon = await connector.getChampionIcon(championId)
spell1Id = participant['spell1Id']
spell2Id = participant['spell2Id']
spell1Icon = await connector.getSummonerSpellIcon(spell1Id)
spell2Icon = await connector.getSummonerSpellIcon(spell2Id)
stats = participant['stats']
champLevel = stats['champLevel']
kills = stats['kills']
deaths = stats['deaths']
assists = stats['assists']
itemIds = [
stats['item0'],
stats['item1'],
stats['item2'],
stats['item3'],
stats['item4'],
stats['item5'],
stats['item6'],
]
itemIcons = [await connector.getItemIcon(itemId) for itemId in itemIds]
runeId = stats['perk0']
runeIcon = await connector.getRuneIcon(runeId)
cs = stats['totalMinionsKilled'] + stats['neutralMinionsKilled']
gold = stats['goldEarned']
remake = stats['gameEndedInEarlySurrender']
win = stats['win']
timeline = participant['timeline']
lane = timeline['lane']
role = timeline['role']
position = None
pt = ToolsTranslator()
if queueId in [420, 440]:
if lane == 'TOP':
position = pt.top
elif lane == "JUNGLE":
position = pt.jungle
elif lane == 'MIDDLE':
position = pt.middle
elif role == 'SUPPORT':
position = pt.support
elif lane == 'BOTTOM' and role == 'CARRY':
position = pt.bottom
return {
'queueId': queueId,
'gameId': gameId,
'time': time,
'shortTime': shortTime,
'name': modeName,
'map': mapName,
'duration': duration,
'remake': remake,
'win': win,
'championId': championId,
'championIcon': championIcon,
'spell1Icon': spell1Icon,
'spell2Icon': spell2Icon,
'champLevel': champLevel,
'kills': kills,
'deaths': deaths,
'assists': assists,
'itemIcons': itemIcons,
'runeIcon': runeIcon,
'cs': cs,
'gold': gold,
'timeStamp': timeStamp,
'position': position,
}
async def parseGamesDataConcurrently(games):
tasks = [parseGameData(game) for game in games]
return await asyncio.gather(*tasks) | null |
3,280 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
cfg = Config()
connector = LolClientConnector()
async def autoPickOrBan(data):
isAutoBan = cfg.get(cfg.enableAutoBanChampion)
isAutoSelect = cfg.get(cfg.enableAutoSelectChampion)
isAutoCompleted = cfg.get(cfg.enableAutoSelectTimeoutCompleted)
localPlayerCellId = data["data"]["localPlayerCellId"]
team = data['data']["myTeam"]
actions = data['data']['actions']
timer = data['data']['timer']
if timer["phase"] != "BAN_PICK":
return
for actionGroup in actions:
for action in actionGroup:
if (action["actorCellId"] == localPlayerCellId
and not action["completed"] and action["isInProgress"]):
actionId = action["id"]
if isAutoSelect and action["type"] == "pick":
isPicked = False
for player in team:
if player["cellId"] == localPlayerCellId:
isPicked = bool(player["championId"]) or bool(
player["championPickIntent"])
break
if not isPicked:
championId = connector.manager.getChampionIdByName(
cfg.get(cfg.autoSelectChampion))
await connector.selectChampion(
actionId, championId)
# 超时自动锁定
if isAutoCompleted:
totalTime = timer["totalTimeInPhase"]
timeLeft = timer["adjustedTimeLeftInPhase"]
if totalTime - timeLeft > 1000: # 满足情况时, 可能是别人的timer
return
await asyncio.sleep(int(timeLeft / 1000) - 1)
sess = await connector.getChampSelectSession()
for player in sess["myTeam"]:
if player["cellId"] == localPlayerCellId: # 找到自己
if player["championPickIntent"] == championId: # 如果仍然和自动亮起的英雄一样(上厕所去了), 锁一下
await connector.selectChampion(actionId, championId, True)
break
elif isAutoBan and action["type"] == "ban":
championId = connector.manager.getChampionIdByName(
cfg.get(cfg.autoBanChampion))
# 给队友一点预选的时间
await asyncio.sleep(cfg.get(cfg.autoBanDelay))
isFriendly = cfg.get(cfg.pretentBan)
if isFriendly:
for player in team:
if championId == player["championPickIntent"]:
championId = 0
break
await connector.banChampion(actionId, championId, True)
break | null |
3,281 | import time
import win32gui
import win32con
import win32api
import ctypes
import qasync
import asyncio
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from ..common.config import cfg, Language
from ..lol.connector import LolClientConnector, connector
connector = LolClientConnector()
The provided code snippet includes necessary dependencies for implementing the `fixLeagueClientWindow` function. Write a Python function `async def fixLeagueClientWindow()` to solve the following problem:
#### 需要管理员权限 调用 Win API 手动调整窗口大小 / 位置 详情请见 https://github.com/LeagueTavern/fix-lcu-window @return: 当且仅当需要修复且权限不足时返回 `False`
Here is the function:
async def fixLeagueClientWindow():
"""
#### 需要管理员权限
调用 Win API 手动调整窗口大小 / 位置
详情请见 https://github.com/LeagueTavern/fix-lcu-window
@return: 当且仅当需要修复且权限不足时返回 `False`
"""
windowHWnd = win32gui.FindWindow("RCLIENT", "League of Legends")
# 客户端只有在 DX 9 模式下这个玩意才不是 0
windowCefHWnd = win32gui.FindWindowEx(
windowHWnd, 0, "CefBrowserWindow", None)
if not windowHWnd or not windowCefHWnd:
return True
# struct WINDOWPLACEMENT {
# UINT length; (事实上并没有该字段)
# UINT flags;
# UINT showCmd;
# POINT ptMinPosition;
# POINT ptMaxPosition;
# RECT rcNormalPosition;
# } ;
placement = win32gui.GetWindowPlacement(windowHWnd)
if placement[1] == win32con.SW_SHOWMINIMIZED:
return True
# struct RECT {
# LONG left;
# LONG top;
# LONG right;
# LONG bottom;
# }
windowRect = win32gui.GetWindowRect(windowHWnd)
windowCefRect = win32gui.GetWindowRect(windowCefHWnd)
def needResize(rect):
return (rect[3] - rect[1]) / (rect[2] - rect[0]) != 0.5625
if not needResize(windowRect) and not needResize(windowCefRect):
return True
clientZoom = int(await connector.getClientZoom())
screenWidth = win32api.GetSystemMetrics(0)
screenHeight = win32api.GetSystemMetrics(1)
targetWindowWidth = 1280 * clientZoom
targetWindowHeight = 720 * clientZoom
def patchDpiChangedMessage(hWnd):
dpi = ctypes.windll.user32.GetDpiForWindow(hWnd)
wParam = win32api.MAKELONG(dpi, dpi)
lParam = ctypes.pointer((ctypes.c_int * 4)(0, 0, 0, 0))
WM_DPICHANGED = 0x02E0
win32api.SendMessage(hWnd, WM_DPICHANGED, wParam, lParam)
try:
patchDpiChangedMessage(windowHWnd)
patchDpiChangedMessage(windowCefHWnd)
SWP_SHOWWINDOW = 0x0040
win32gui.SetWindowPos(
windowHWnd,
0,
(screenWidth - targetWindowWidth) // 2,
(screenHeight - targetWindowHeight) // 2,
targetWindowWidth, targetWindowHeight,
SWP_SHOWWINDOW
)
win32gui.SetWindowPos(
windowCefHWnd,
0,
0,
0,
targetWindowWidth,
targetWindowHeight,
SWP_SHOWWINDOW
)
except:
# 需要管理员权限
return False
return True | #### 需要管理员权限 调用 Win API 手动调整窗口大小 / 位置 详情请见 https://github.com/LeagueTavern/fix-lcu-window @return: 当且仅当需要修复且权限不足时返回 `False` |
3,282 | from enum import Enum
from typing import Tuple
import traceback
from PyQt5.QtGui import QColor, QClipboard
from PyQt5.QtCore import QObject
from app.common.qfluentwidgets import StyleSheetBase, Theme, qconfig, isDarkTheme
from app.common.config import cfg
from app.common.signals import signalBus
def __getStyleSheetColor(color: QColor):
'''
返回主颜色、鼠标悬停颜色、鼠标按下颜色以及边框颜色
'''
r, g, b, a = color.getRgb()
f1, f2 = 1.1, 0.9
r1, g1, b1 = min(r * f1, 255), min(g * f1, 255), min(b * f1, 255)
r2, g2, b2 = min(r * f2, 255), min(g * f2, 255), min(b * f2, 255)
a1, a2 = min(a + 25, 255), min(a + 50, 255)
c1 = QColor.fromRgb(r1, g1, b1, a1)
c2 = QColor.fromRgb(r2, g2, b2, a2)
c3 = QColor.fromRgb(r, g, b, min(a+130, 255))
return color, c1, c2, c3
cfg = Config()
def __getWinColor():
color = cfg.get(cfg.winCardColor)
return __getStyleSheetColor(color) | null |
3,283 | from enum import Enum
from typing import Tuple
import traceback
from PyQt5.QtGui import QColor, QClipboard
from PyQt5.QtCore import QObject
from app.common.qfluentwidgets import StyleSheetBase, Theme, qconfig, isDarkTheme
from app.common.config import cfg
from app.common.signals import signalBus
def __getStyleSheetColor(color: QColor):
'''
返回主颜色、鼠标悬停颜色、鼠标按下颜色以及边框颜色
'''
r, g, b, a = color.getRgb()
f1, f2 = 1.1, 0.9
r1, g1, b1 = min(r * f1, 255), min(g * f1, 255), min(b * f1, 255)
r2, g2, b2 = min(r * f2, 255), min(g * f2, 255), min(b * f2, 255)
a1, a2 = min(a + 25, 255), min(a + 50, 255)
c1 = QColor.fromRgb(r1, g1, b1, a1)
c2 = QColor.fromRgb(r2, g2, b2, a2)
c3 = QColor.fromRgb(r, g, b, min(a+130, 255))
return color, c1, c2, c3
cfg = Config()
def __getLoseColor():
color = cfg.get(cfg.loseCardColor)
return __getStyleSheetColor(color) | null |
3,284 | from enum import Enum
from typing import Tuple
import traceback
from PyQt5.QtGui import QColor, QClipboard
from PyQt5.QtCore import QObject
from app.common.qfluentwidgets import StyleSheetBase, Theme, qconfig, isDarkTheme
from app.common.config import cfg
from app.common.signals import signalBus
def __getStyleSheetColor(color: QColor):
cfg = Config()
def __getRemakeColor():
color = cfg.get(cfg.remakeCardColor)
return __getStyleSheetColor(color) | null |
3,285 | from enum import Enum
from typing import Tuple
import traceback
from PyQt5.QtGui import QColor, QClipboard
from PyQt5.QtCore import QObject
from app.common.qfluentwidgets import StyleSheetBase, Theme, qconfig, isDarkTheme
from app.common.config import cfg
from app.common.signals import signalBus
def __getDefaultColor():
color = QColor(233, 233, 233, 13 if isDarkTheme() else 170)
c1 = QColor(243, 243, 243, 21 if isDarkTheme() else 127)
c2 = QColor(255, 255, 255, 8 if isDarkTheme() else 64)
c3 = QColor(255, 255, 255, 20) if isDarkTheme(
) else QColor(0, 0, 0, 25)
return color, c1, c2, c3 | null |
3,286 | from enum import Enum
from typing import Tuple
import traceback
from PyQt5.QtGui import QColor, QClipboard
from PyQt5.QtCore import QObject
from app.common.qfluentwidgets import StyleSheetBase, Theme, qconfig, isDarkTheme
from app.common.config import cfg
from app.common.signals import signalBus
def __getStyleSheetColor(color: QColor):
'''
返回主颜色、鼠标悬停颜色、鼠标按下颜色以及边框颜色
'''
r, g, b, a = color.getRgb()
f1, f2 = 1.1, 0.9
r1, g1, b1 = min(r * f1, 255), min(g * f1, 255), min(b * f1, 255)
r2, g2, b2 = min(r * f2, 255), min(g * f2, 255), min(b * f2, 255)
a1, a2 = min(a + 25, 255), min(a + 50, 255)
c1 = QColor.fromRgb(r1, g1, b1, a1)
c2 = QColor.fromRgb(r2, g2, b2, a2)
c3 = QColor.fromRgb(r, g, b, min(a+130, 255))
return color, c1, c2, c3
def __getTeam1Color():
# TODO: 开放用户自定义设置
color = QColor.fromRgb(255, 176, 27, 39)
return __getStyleSheetColor(color) | null |
3,287 | from enum import Enum
from typing import Tuple
import traceback
from PyQt5.QtGui import QColor, QClipboard
from PyQt5.QtCore import QObject
from app.common.qfluentwidgets import StyleSheetBase, Theme, qconfig, isDarkTheme
from app.common.config import cfg
from app.common.signals import signalBus
def __getStyleSheetColor(color: QColor):
'''
返回主颜色、鼠标悬停颜色、鼠标按下颜色以及边框颜色
'''
r, g, b, a = color.getRgb()
f1, f2 = 1.1, 0.9
r1, g1, b1 = min(r * f1, 255), min(g * f1, 255), min(b * f1, 255)
r2, g2, b2 = min(r * f2, 255), min(g * f2, 255), min(b * f2, 255)
a1, a2 = min(a + 25, 255), min(a + 50, 255)
c1 = QColor.fromRgb(r1, g1, b1, a1)
c2 = QColor.fromRgb(r2, g2, b2, a2)
c3 = QColor.fromRgb(r, g, b, min(a+130, 255))
return color, c1, c2, c3
def __getTeam2Color():
# TODO: 开放用户自定义设置
color = QColor.fromRgb(255, 51, 153, 39)
return __getStyleSheetColor(color) | null |
3,288 | import requests
import base64
import subprocess
import psutil
from app.common.config import cfg, VERSION
def getTasklistPath():
for path in ['tasklist',
'C:/Windows/System32/tasklist.exe']:
try:
cmd = f'{path} /FI "imagename eq LeagueClientUx.exe" /NH'
_ = subprocess.check_output(cmd, shell=True)
return path
except:
pass
return None | null |
3,289 | import requests
import base64
import subprocess
import psutil
from app.common.config import cfg, VERSION
def getLolClientPidSlowly():
for process in psutil.process_iter():
if process.name() in ['LeagueClientUx.exe', 'LeagueClientUx']:
return process.pid
return -1 | null |
3,290 | import requests
import base64
import subprocess
import psutil
from app.common.config import cfg, VERSION
def getLolClientPid(path):
processes = subprocess.check_output(
f'{path} /FI "imagename eq LeagueClientUx.exe" /NH', shell=True)
if b'LeagueClientUx.exe' in processes:
arr = processes.split()
try:
pos = arr.index(b"LeagueClientUx.exe")
return int(arr[pos+1])
except ValueError:
raise ValueError(f"Subprocess return exception: {processes}")
else:
return 0 | null |
3,291 | import requests
import base64
import subprocess
import psutil
from app.common.config import cfg, VERSION
def getLolClientPids(path):
processes = subprocess.check_output(
f'{path} /FI "imagename eq LeagueClientUx.exe" /NH', shell=True)
if not b'LeagueClientUx.exe' in processes:
return 0
pids = []
arr = processes.split()
for i, s in enumerate(arr):
if s == b'LeagueClientUx.exe':
pids.append(int(arr[i + 1]))
return pids | null |
3,292 | import requests
import base64
import subprocess
import psutil
from app.common.config import cfg, VERSION
def getLolClientPidsSlowly():
pids = []
for process in psutil.process_iter():
if process.name() in ['LeagueClientUx.exe', 'LeagueClientUx']:
pids.append(process.pid)
return pids | null |
3,293 | import requests
import base64
import subprocess
import psutil
from app.common.config import cfg, VERSION
def isLolGameProcessExist(path):
processes = subprocess.check_output(
f'{path} /FI "imagename eq League of Legends.exe" /NH', shell=True)
return b'League of Legends.exe' in processes | null |
3,294 | import requests
import base64
import subprocess
import psutil
from app.common.config import cfg, VERSION
The provided code snippet includes necessary dependencies for implementing the `getPortTokenServerByPid` function. Write a Python function `def getPortTokenServerByPid(pid)` to solve the following problem:
通过进程 id 获得启动命令行参数中的 port、token 以及登录服务器
Here is the function:
def getPortTokenServerByPid(pid):
'''
通过进程 id 获得启动命令行参数中的 port、token 以及登录服务器
'''
port, token, server = None, None, None
process = psutil.Process(pid)
cmdline = process.cmdline()
for cmd in cmdline:
p = cmd.find("--app-port=")
if p != -1:
port = cmd[11:]
p = cmd.find("--remoting-auth-token=")
if p != -1:
token = cmd[22:]
p = cmd.find("--rso_platform_id=")
if p != -1:
server = cmd[18:]
if port and token and server:
break
return port, token, server | 通过进程 id 获得启动命令行参数中的 port、token 以及登录服务器 |
3,295 | import subprocess
bat = '''@echo off
:start
tasklist | find /i "Seraphine.exe" > nul
if NOT errorlevel 1 (
echo Seraphine is running, waiting...
timeout /t 1 > nul
goto start
)
for /d %%i in (*) do (
rmdir "%%~fi" /s /q
)
for %%i in (*) do (
if NOT "%%i" equ "updater.bat" (
del "%%i" /s /q
)
)
set src=%AppData%\\Seraphine\\temp
for /D %%a in (%src%\\*) do (
move %%a .
)
for %%a in (%src%\\*) do (
move %%a .
)
rmdir %src% /s /q
start /b .\Seraphine.exe
del %0
'''
def runUpdater():
with open("updater.bat", 'w', encoding='utf-8') as f:
f.write(bat)
subprocess.Popen("updater.bat") | null |
3,296 | from enum import Enum
import os
import sys
from PyQt5.QtCore import QLocale
from .qfluentwidgets import (qconfig, QConfig, ConfigItem, FolderValidator, BoolValidator,
OptionsConfigItem, OptionsValidator, ConfigSerializer,
RangeConfigItem, RangeValidator, EnumSerializer, ColorConfigItem)
def isWin11():
return sys.platform == 'win32' and sys.getwindowsversion().build >= 22000 | null |
3,297 | import os.path
from data.base_dataset import BaseDataset, get_transform, get_affine_mat, apply_img_affine, apply_lm_affine
from data.image_folder import make_dataset
from PIL import Image
import random
import util.util as util
import numpy as np
import json
import torch
from scipy.io import loadmat, savemat
import pickle
from util.preprocess import align_img, estimate_norm
from util.load_mats import load_lm3d
The provided code snippet includes necessary dependencies for implementing the `default_flist_reader` function. Write a Python function `def default_flist_reader(flist)` to solve the following problem:
flist format: impath label\nimpath label\n ...(same to caffe's filelist)
Here is the function:
def default_flist_reader(flist):
"""
flist format: impath label\nimpath label\n ...(same to caffe's filelist)
"""
imlist = []
with open(flist, 'r') as rf:
for line in rf.readlines():
impath = line.strip()
imlist.append(impath)
return imlist | flist format: impath label\nimpath label\n ...(same to caffe's filelist) |
3,298 | import os.path
from data.base_dataset import BaseDataset, get_transform, get_affine_mat, apply_img_affine, apply_lm_affine
from data.image_folder import make_dataset
from PIL import Image
import random
import util.util as util
import numpy as np
import json
import torch
from scipy.io import loadmat, savemat
import pickle
from util.preprocess import align_img, estimate_norm
from util.load_mats import load_lm3d
def jason_flist_reader(flist):
with open(flist, 'r') as fp:
info = json.load(fp)
return info | null |
3,299 | import os.path
from data.base_dataset import BaseDataset, get_transform, get_affine_mat, apply_img_affine, apply_lm_affine
from data.image_folder import make_dataset
from PIL import Image
import random
import util.util as util
import numpy as np
import json
import torch
from scipy.io import loadmat, savemat
import pickle
from util.preprocess import align_img, estimate_norm
from util.load_mats import load_lm3d
def parse_label(label):
return torch.tensor(np.array(label).astype(np.float32)) | null |
3,300 | import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
def get_transform(grayscale=False):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
transform_list += [transforms.ToTensor()]
return transforms.Compose(transform_list) | null |
3,301 | import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
def get_affine_mat(opt, size):
shift_x, shift_y, scale, rot_angle, flip = 0., 0., 1., 0., False
w, h = size
if 'shift' in opt.preprocess:
shift_pixs = int(opt.shift_pixs)
shift_x = random.randint(-shift_pixs, shift_pixs)
shift_y = random.randint(-shift_pixs, shift_pixs)
if 'scale' in opt.preprocess:
scale = 1 + opt.scale_delta * (2 * random.random() - 1)
if 'rot' in opt.preprocess:
rot_angle = opt.rot_angle * (2 * random.random() - 1)
rot_rad = -rot_angle * np.pi/180
if 'flip' in opt.preprocess:
flip = random.random() > 0.5
shift_to_origin = np.array([1, 0, -w//2, 0, 1, -h//2, 0, 0, 1]).reshape([3, 3])
flip_mat = np.array([-1 if flip else 1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape([3, 3])
shift_mat = np.array([1, 0, shift_x, 0, 1, shift_y, 0, 0, 1]).reshape([3, 3])
rot_mat = np.array([np.cos(rot_rad), np.sin(rot_rad), 0, -np.sin(rot_rad), np.cos(rot_rad), 0, 0, 0, 1]).reshape([3, 3])
scale_mat = np.array([scale, 0, 0, 0, scale, 0, 0, 0, 1]).reshape([3, 3])
shift_to_center = np.array([1, 0, w//2, 0, 1, h//2, 0, 0, 1]).reshape([3, 3])
affine = shift_to_center @ scale_mat @ rot_mat @ shift_mat @ flip_mat @ shift_to_origin
affine_inv = np.linalg.inv(affine)
return affine, affine_inv, flip | null |
3,302 | import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
def apply_img_affine(img, affine_inv, method=Image.BICUBIC):
return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.BICUBIC) | null |
3,303 | import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
def apply_lm_affine(landmark, affine, flip, size):
_, h = size
lm = landmark.copy()
lm[:, 1] = h - 1 - lm[:, 1]
lm = np.concatenate((lm, np.ones([lm.shape[0], 1])), -1)
lm = lm @ np.transpose(affine)
lm[:, :2] = lm[:, :2] / lm[:, 2:]
lm = lm[:, :2]
lm[:, 1] = h - 1 - lm[:, 1]
if flip:
lm_ = lm.copy()
lm_[:17] = lm[16::-1]
lm_[17:22] = lm[26:21:-1]
lm_[22:27] = lm[21:16:-1]
lm_[31:36] = lm[35:30:-1]
lm_[36:40] = lm[45:41:-1]
lm_[40:42] = lm[47:45:-1]
lm_[42:46] = lm[39:35:-1]
lm_[46:48] = lm[41:39:-1]
lm_[48:55] = lm[54:47:-1]
lm_[55:60] = lm[59:54:-1]
lm_[60:65] = lm[64:59:-1]
lm_[65:68] = lm[67:64:-1]
lm = lm_
return lm | null |
3,304 | import numpy as np
import torch.utils.data as data
from PIL import Image
import os
import os.path
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))] | null |
3,305 | import numpy as np
import torch.utils.data as data
from PIL import Image
import os
import os.path
def default_loader(path):
return Image.open(path).convert('RGB') | null |
3,306 | import numpy as np
import torch
import torch.nn as nn
from kornia.geometry import warp_affine
import torch.nn.functional as F
def resize_n_crop(image, M, dsize=112):
# image: (b, c, h, w)
# M : (b, 2, 3)
return warp_affine(image, M, dsize=(dsize, dsize), align_corners=True) | null |
3,307 | import numpy as np
import torch
import torch.nn as nn
from kornia.geometry import warp_affine
import torch.nn.functional as F
def perceptual_loss(id_featureA, id_featureB):
cosine_d = torch.sum(id_featureA * id_featureB, dim=-1)
# assert torch.sum((cosine_d > 1).float()) == 0
return torch.sum(1 - cosine_d) / cosine_d.shape[0] | null |
3,308 | import numpy as np
import torch
import torch.nn as nn
from kornia.geometry import warp_affine
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `photo_loss` function. Write a Python function `def photo_loss(imageA, imageB, mask, eps=1e-6)` to solve the following problem:
l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur) Parameters: imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order imageB --same as imageA
Here is the function:
def photo_loss(imageA, imageB, mask, eps=1e-6):
"""
l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur)
Parameters:
imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order
imageB --same as imageA
"""
loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask
loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device))
return loss | l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur) Parameters: imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order imageB --same as imageA |
3,309 | import numpy as np
import torch
import torch.nn as nn
from kornia.geometry import warp_affine
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `landmark_loss` function. Write a Python function `def landmark_loss(predict_lm, gt_lm, weight=None)` to solve the following problem:
weighted mse loss Parameters: predict_lm --torch.tensor (B, 68, 2) gt_lm --torch.tensor (B, 68, 2) weight --numpy.array (1, 68)
Here is the function:
def landmark_loss(predict_lm, gt_lm, weight=None):
"""
weighted mse loss
Parameters:
predict_lm --torch.tensor (B, 68, 2)
gt_lm --torch.tensor (B, 68, 2)
weight --numpy.array (1, 68)
"""
if not weight:
weight = np.ones([68])
weight[28:31] = 20
weight[-8:] = 20
weight = np.expand_dims(weight, 0)
weight = torch.tensor(weight).to(predict_lm.device)
loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight
loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1])
return loss | weighted mse loss Parameters: predict_lm --torch.tensor (B, 68, 2) gt_lm --torch.tensor (B, 68, 2) weight --numpy.array (1, 68) |
3,310 | import numpy as np
import torch
import torch.nn as nn
from kornia.geometry import warp_affine
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `reg_loss` function. Write a Python function `def reg_loss(coeffs_dict, opt=None)` to solve the following problem:
l2 norm without the sqrt, from yu's implementation (mse) tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss Parameters: coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans
Here is the function:
def reg_loss(coeffs_dict, opt=None):
"""
l2 norm without the sqrt, from yu's implementation (mse)
tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss
Parameters:
coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans
"""
# coefficient regularization to ensure plausible 3d faces
if opt:
w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex
else:
w_id, w_exp, w_tex = 1, 1, 1, 1
creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \
w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \
w_tex * torch.sum(coeffs_dict['tex'] ** 2)
creg_loss = creg_loss / coeffs_dict['id'].shape[0]
# gamma regularization to ensure a nearly-monochromatic light
gamma = coeffs_dict['gamma'].reshape([-1, 3, 9])
gamma_mean = torch.mean(gamma, dim=1, keepdims=True)
gamma_loss = torch.mean((gamma - gamma_mean) ** 2)
return creg_loss, gamma_loss | l2 norm without the sqrt, from yu's implementation (mse) tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss Parameters: coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans |
3,311 | import numpy as np
import torch
import torch.nn as nn
from kornia.geometry import warp_affine
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `reflectance_loss` function. Write a Python function `def reflectance_loss(texture, mask)` to solve the following problem:
minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo Parameters: texture --torch.tensor, (B, N, 3) mask --torch.tensor, (N), 1 or 0
Here is the function:
def reflectance_loss(texture, mask):
"""
minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo
Parameters:
texture --torch.tensor, (B, N, 3)
mask --torch.tensor, (N), 1 or 0
"""
mask = mask.reshape([1, mask.shape[0], 1])
texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask)
loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask))
return loss | minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo Parameters: texture --torch.tensor, (B, N, 3) mask --torch.tensor, (N), 1 or 0 |
3,312 | import torch.nn as nn
from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Sequential, Module
import torch
class MobileFaceNet(Module):
def __init__(self, fp16=False, num_features=512):
super(MobileFaceNet, self).__init__()
scale = 2
self.fp16 = fp16
self.layers = nn.Sequential(
ConvBlock(3, 64 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1)),
ConvBlock(64 * scale, 64 * scale, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64),
DepthWise(64 * scale, 64 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128),
Residual(64 * scale, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1)),
DepthWise(64 * scale, 128 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256),
Residual(128 * scale, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)),
DepthWise(128 * scale, 128 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512),
Residual(128 * scale, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)),
)
self.conv_sep = ConvBlock(128 * scale, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0))
self.features = GDC(num_features)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
with torch.cuda.amp.autocast(self.fp16):
x = self.layers(x)
x = self.conv_sep(x.float() if self.fp16 else x)
x = self.features(x)
return x
def get_mbf(fp16, num_features):
return MobileFaceNet(fp16, num_features) | null |
3,313 | import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation) | 3x3 convolution with padding |
3,314 | import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False) | 1x1 convolution |
3,315 | import torch
from torch import nn
class IBasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
def forward(self, x):
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs) | null |
3,316 | import torch
from torch import nn
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs) | null |
3,317 | import torch
from torch import nn
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs) | null |
3,318 | import torch
from torch import nn
class IBasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
def forward(self, x):
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs) | null |
3,319 | import torch
from torch import nn
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet200(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
progress, **kwargs) | null |
3,320 | import torch
from torch import nn
from torch.utils.checkpoint import checkpoint_sequential
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation) | 3x3 convolution with padding |
3,321 | import torch
from torch import nn
from torch.utils.checkpoint import checkpoint_sequential
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False) | 1x1 convolution |
3,322 | import torch
from torch import nn
from torch.utils.checkpoint import checkpoint_sequential
class IBasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
def forward(self, x):
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
def iresnet2060(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs) | null |
3,323 | import argparse
import cv2
import numpy as np
import torch
from backbones import get_model
def get_model(name, **kwargs):
# resnet
if name == "r18":
return iresnet18(False, **kwargs)
elif name == "r34":
return iresnet34(False, **kwargs)
elif name == "r50":
return iresnet50(False, **kwargs)
elif name == "r100":
return iresnet100(False, **kwargs)
elif name == "r200":
return iresnet200(False, **kwargs)
elif name == "r2060":
from .iresnet2060 import iresnet2060
return iresnet2060(False, **kwargs)
elif name == "mbf":
fp16 = kwargs.get("fp16", False)
num_features = kwargs.get("num_features", 512)
return get_mbf(fp16=fp16, num_features=num_features)
else:
raise ValueError()
def inference(weight, name, img):
if img is None:
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8)
else:
img = cv2.imread(img)
img = cv2.resize(img, (112, 112))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, (2, 0, 1))
img = torch.from_numpy(img).unsqueeze(0).float()
img.div_(255).sub_(0.5).div_(0.5)
net = get_model(name, fp16=False)
net.load_state_dict(torch.load(weight))
net.eval()
feat = net(img).numpy()
print(feat) | null |
3,324 | import argparse
import os
import pickle
import timeit
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import prettytable
import skimage.transform
from sklearn.metrics import roc_curve
from sklearn.preprocessing import normalize
from onnx_helper import ArcFaceORT
class ArcFaceORT:
def __init__(self, model_path, cpu=False):
self.model_path = model_path
# providers = None will use available provider, for onnxruntime-gpu it will be "CUDAExecutionProvider"
self.providers = ['CPUExecutionProvider'] if cpu else None
#input_size is (w,h), return error message, return None if success
def check(self, track='cfat', test_img = None):
#default is cfat
max_model_size_mb=1024
max_feat_dim=512
max_time_cost=15
if track.startswith('ms1m'):
max_model_size_mb=1024
max_feat_dim=512
max_time_cost=10
elif track.startswith('glint'):
max_model_size_mb=1024
max_feat_dim=1024
max_time_cost=20
elif track.startswith('cfat'):
max_model_size_mb = 1024
max_feat_dim = 512
max_time_cost = 15
elif track.startswith('unconstrained'):
max_model_size_mb=1024
max_feat_dim=1024
max_time_cost=30
else:
return "track not found"
if not os.path.exists(self.model_path):
return "model_path not exists"
if not os.path.isdir(self.model_path):
return "model_path should be directory"
onnx_files = []
for _file in os.listdir(self.model_path):
if _file.endswith('.onnx'):
onnx_files.append(osp.join(self.model_path, _file))
if len(onnx_files)==0:
return "do not have onnx files"
self.model_file = sorted(onnx_files)[-1]
print('use onnx-model:', self.model_file)
try:
session = onnxruntime.InferenceSession(self.model_file, providers=self.providers)
except:
return "load onnx failed"
input_cfg = session.get_inputs()[0]
input_shape = input_cfg.shape
print('input-shape:', input_shape)
if len(input_shape)!=4:
return "length of input_shape should be 4"
if not isinstance(input_shape[0], str):
#return "input_shape[0] should be str to support batch-inference"
print('reset input-shape[0] to None')
model = onnx.load(self.model_file)
model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
new_model_file = osp.join(self.model_path, 'zzzzrefined.onnx')
onnx.save(model, new_model_file)
self.model_file = new_model_file
print('use new onnx-model:', self.model_file)
try:
session = onnxruntime.InferenceSession(self.model_file, providers=self.providers)
except:
return "load onnx failed"
input_cfg = session.get_inputs()[0]
input_shape = input_cfg.shape
print('new-input-shape:', input_shape)
self.image_size = tuple(input_shape[2:4][::-1])
#print('image_size:', self.image_size)
input_name = input_cfg.name
outputs = session.get_outputs()
output_names = []
for o in outputs:
output_names.append(o.name)
#print(o.name, o.shape)
if len(output_names)!=1:
return "number of output nodes should be 1"
self.session = session
self.input_name = input_name
self.output_names = output_names
#print(self.output_names)
model = onnx.load(self.model_file)
graph = model.graph
if len(graph.node)<8:
return "too small onnx graph"
input_size = (112,112)
self.crop = None
if track=='cfat':
crop_file = osp.join(self.model_path, 'crop.txt')
if osp.exists(crop_file):
lines = open(crop_file,'r').readlines()
if len(lines)!=6:
return "crop.txt should contain 6 lines"
lines = [int(x) for x in lines]
self.crop = lines[:4]
input_size = tuple(lines[4:6])
if input_size!=self.image_size:
return "input-size is inconsistant with onnx model input, %s vs %s"%(input_size, self.image_size)
self.model_size_mb = os.path.getsize(self.model_file) / float(1024*1024)
if self.model_size_mb > max_model_size_mb:
return "max model size exceed, given %.3f-MB"%self.model_size_mb
input_mean = None
input_std = None
if track=='cfat':
pn_file = osp.join(self.model_path, 'pixel_norm.txt')
if osp.exists(pn_file):
lines = open(pn_file,'r').readlines()
if len(lines)!=2:
return "pixel_norm.txt should contain 2 lines"
input_mean = float(lines[0])
input_std = float(lines[1])
if input_mean is not None or input_std is not None:
if input_mean is None or input_std is None:
return "please set input_mean and input_std simultaneously"
else:
find_sub = False
find_mul = False
for nid, node in enumerate(graph.node[:8]):
print(nid, node.name)
if node.name.startswith('Sub') or node.name.startswith('_minus'):
find_sub = True
if node.name.startswith('Mul') or node.name.startswith('_mul') or node.name.startswith('Div'):
find_mul = True
if find_sub and find_mul:
print("find sub and mul")
#mxnet arcface model
input_mean = 0.0
input_std = 1.0
else:
input_mean = 127.5
input_std = 127.5
self.input_mean = input_mean
self.input_std = input_std
for initn in graph.initializer:
weight_array = numpy_helper.to_array(initn)
dt = weight_array.dtype
if dt.itemsize<4:
return 'invalid weight type - (%s:%s)' % (initn.name, dt.name)
if test_img is None:
test_img = get_image('Tom_Hanks_54745')
test_img = cv2.resize(test_img, self.image_size)
else:
test_img = cv2.resize(test_img, self.image_size)
feat, cost = self.benchmark(test_img)
batch_result = self.check_batch(test_img)
batch_result_sum = float(np.sum(batch_result))
if batch_result_sum in [float('inf'), -float('inf')] or batch_result_sum != batch_result_sum:
print(batch_result)
print(batch_result_sum)
return "batch result output contains NaN!"
if len(feat.shape) < 2:
return "the shape of the feature must be two, but get {}".format(str(feat.shape))
if feat.shape[1] > max_feat_dim:
return "max feat dim exceed, given %d"%feat.shape[1]
self.feat_dim = feat.shape[1]
cost_ms = cost*1000
if cost_ms>max_time_cost:
return "max time cost exceed, given %.4f"%cost_ms
self.cost_ms = cost_ms
print('check stat:, model-size-mb: %.4f, feat-dim: %d, time-cost-ms: %.4f, input-mean: %.3f, input-std: %.3f'%(self.model_size_mb, self.feat_dim, self.cost_ms, self.input_mean, self.input_std))
return None
def check_batch(self, img):
if not isinstance(img, list):
imgs = [img, ] * 32
if self.crop is not None:
nimgs = []
for img in imgs:
nimg = img[self.crop[1]:self.crop[3], self.crop[0]:self.crop[2], :]
if nimg.shape[0] != self.image_size[1] or nimg.shape[1] != self.image_size[0]:
nimg = cv2.resize(nimg, self.image_size)
nimgs.append(nimg)
imgs = nimgs
blob = cv2.dnn.blobFromImages(
images=imgs, scalefactor=1.0 / self.input_std, size=self.image_size,
mean=(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
return net_out
def meta_info(self):
return {'model-size-mb':self.model_size_mb, 'feature-dim':self.feat_dim, 'infer': self.cost_ms}
def forward(self, imgs):
if not isinstance(imgs, list):
imgs = [imgs]
input_size = self.image_size
if self.crop is not None:
nimgs = []
for img in imgs:
nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:]
if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]:
nimg = cv2.resize(nimg, input_size)
nimgs.append(nimg)
imgs = nimgs
blob = cv2.dnn.blobFromImages(imgs, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
net_out = self.session.run(self.output_names, {self.input_name : blob})[0]
return net_out
def benchmark(self, img):
input_size = self.image_size
if self.crop is not None:
nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:]
if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]:
nimg = cv2.resize(nimg, input_size)
img = nimg
blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
costs = []
for _ in range(50):
ta = datetime.datetime.now()
net_out = self.session.run(self.output_names, {self.input_name : blob})[0]
tb = datetime.datetime.now()
cost = (tb-ta).total_seconds()
costs.append(cost)
costs = sorted(costs)
cost = costs[5]
return net_out, cost
def extract(model_root, dataset):
model = ArcFaceORT(model_path=model_root)
model.check()
feat_mat = np.zeros(shape=(len(dataset), 2 * model.feat_dim))
def batchify_fn(data):
return mx.nd.concat(*data, dim=0)
data_loader = mx.gluon.data.DataLoader(
dataset, 128, last_batch='keep', num_workers=4,
thread_pool=True, prefetch=16, batchify_fn=batchify_fn)
num_iter = 0
for batch in data_loader:
batch = batch.asnumpy()
batch = (batch - model.input_mean) / model.input_std
feat = model.session.run(model.output_names, {model.input_name: batch})[0]
feat = np.reshape(feat, (-1, model.feat_dim * 2))
feat_mat[128 * num_iter: 128 * num_iter + feat.shape[0], :] = feat
num_iter += 1
if num_iter % 50 == 0:
print(num_iter)
return feat_mat | null |
3,325 | import argparse
import os
import pickle
import timeit
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import prettytable
import skimage.transform
from sklearn.metrics import roc_curve
from sklearn.preprocessing import normalize
from onnx_helper import ArcFaceORT
def read_template_media_list(path):
ijb_meta = pd.read_csv(path, sep=' ', header=None).values
templates = ijb_meta[:, 1].astype(np.int)
medias = ijb_meta[:, 2].astype(np.int)
return templates, medias | null |
3,326 | import argparse
import os
import pickle
import timeit
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import prettytable
import skimage.transform
from sklearn.metrics import roc_curve
from sklearn.preprocessing import normalize
from onnx_helper import ArcFaceORT
def read_template_pair_list(path):
pairs = pd.read_csv(path, sep=' ', header=None).values
t1 = pairs[:, 0].astype(np.int)
t2 = pairs[:, 1].astype(np.int)
label = pairs[:, 2].astype(np.int)
return t1, t2, label | null |
3,327 | import argparse
import os
import pickle
import timeit
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import prettytable
import skimage.transform
from sklearn.metrics import roc_curve
from sklearn.preprocessing import normalize
from onnx_helper import ArcFaceORT
def read_image_feature(path):
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feats | null |
3,328 | import argparse
import os
import pickle
import timeit
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import prettytable
import skimage.transform
from sklearn.metrics import roc_curve
from sklearn.preprocessing import normalize
from onnx_helper import ArcFaceORT
def image2template_feature(img_feats=None,
templates=None,
medias=None):
unique_templates = np.unique(templates)
template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
for count_template, uqt in enumerate(unique_templates):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [np.mean(face_norm_feats[ind_m], axis=0, keepdims=True), ]
media_norm_feats = np.array(media_norm_feats)
template_feats[count_template] = np.sum(media_norm_feats, axis=0)
if count_template % 2000 == 0:
print('Finish Calculating {} template features.'.format(
count_template))
template_norm_feats = normalize(template_feats)
return template_norm_feats, unique_templates | null |
3,329 | import argparse
import os
import pickle
import timeit
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import prettytable
import skimage.transform
from sklearn.metrics import roc_curve
from sklearn.preprocessing import normalize
from onnx_helper import ArcFaceORT
def verification(template_norm_feats=None,
unique_templates=None,
p1=None,
p2=None):
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),))
total_pairs = np.array(range(len(p1)))
batchsize = 100000
sublists = [total_pairs[i: i + batchsize] for i in range(0, len(p1), batchsize)]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return score | null |
3,330 | import argparse
import os
import pickle
import timeit
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import prettytable
import skimage.transform
from sklearn.metrics import roc_curve
from sklearn.preprocessing import normalize
from onnx_helper import ArcFaceORT
def verification2(template_norm_feats=None,
unique_templates=None,
p1=None,
p2=None):
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),)) # save cosine distance between pairs
total_pairs = np.array(range(len(p1)))
batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
sublists = [total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return score | null |
3,331 | import torch
from torch import nn
class CosFace(nn.Module):
def __init__(self, s=64.0, m=0.40):
super(CosFace, self).__init__()
self.s = s
self.m = m
def forward(self, cosine, label):
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cosine.size()[1], device=cosine.device)
m_hot.scatter_(1, label[index, None], self.m)
cosine[index] -= m_hot
ret = cosine * self.s
return ret
class ArcFace(nn.Module):
def __init__(self, s=64.0, m=0.5):
super(ArcFace, self).__init__()
self.s = s
self.m = m
def forward(self, cosine: torch.Tensor, label):
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cosine.size()[1], device=cosine.device)
m_hot.scatter_(1, label[index, None], self.m)
cosine.acos_()
cosine[index] += m_hot
cosine.cos_().mul_(self.s)
return cosine
def get_loss(name):
if name == "cosface":
return CosFace()
elif name == "arcface":
return ArcFace()
else:
raise ValueError() | null |
3,332 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
for i in range(n)]
for i, e in enumerate(listTemp):
twoList[i % n].append(e)
return twoLis
def divideIntoNstrand(listTemp, n):
twoList = [[] for i in range(n)]
for i, e in enumerate(listTemp):
twoList[i % n].append(e)
return twoList | null |
3,333 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
ijb_meta = pd.read_csv(path, sep=' ', header=None).values
templates = ijb_meta[:, 1].astype(np.int)
medias = ijb_meta[:, 2].astype(np.int)
return templates, media
templates, medias = read_template_media_list(
os.path.join('%s/meta' % image_path,
'%s_face_tid_mid.txt' % target.lower()))
np.save(score_save_file, score)
def read_template_media_list(path):
# ijb_meta = np.loadtxt(path, dtype=str)
ijb_meta = pd.read_csv(path, sep=' ', header=None).values
templates = ijb_meta[:, 1].astype(np.int)
medias = ijb_meta[:, 2].astype(np.int)
return templates, medias | null |
3,334 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
pairs = pd.read_csv(path, sep=' ', header=None).values
t1 = pairs[:, 0].astype(np.int)
t2 = pairs[:, 1].astype(np.int)
label = pairs[:, 2].astype(np.int)
return t1, t2, labe
np.save(score_save_file, score)
def read_template_pair_list(path):
# pairs = np.loadtxt(path, dtype=str)
pairs = pd.read_csv(path, sep=' ', header=None).values
# print(pairs.shape)
# print(pairs[:, 0].astype(np.int))
t1 = pairs[:, 0].astype(np.int)
t2 = pairs[:, 1].astype(np.int)
label = pairs[:, 2].astype(np.int)
return t1, t2, label | null |
3,335 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feat
img_feats = np.empty((len(files), 1024), dtype=np.float32)
return img_feats, faceness_score
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feat
img_feats, faceness_scores = get_image_feature(img_path, files_list,
model_path, 0, gpu_id)
def read_image_feature(path):
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feats | null |
3,336 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
args = parser.parse_args()
batch_size = args.batch_size
class Embedding(object):
def __init__(self, prefix, data_shape, batch_size=1):
image_size = (112, 112)
self.image_size = image_size
weight = torch.load(prefix)
resnet = get_model(args.network, dropout=0, fp16=False).cuda()
resnet.load_state_dict(weight)
model = torch.nn.DataParallel(resnet)
self.model = model
self.model.eval()
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041]], dtype=np.float32)
src[:, 0] += 8.0
self.src = src
self.batch_size = batch_size
self.data_shape = data_shape
def get(self, rimg, landmark):
assert landmark.shape[0] == 68 or landmark.shape[0] == 5
assert landmark.shape[1] == 2
if landmark.shape[0] == 68:
landmark5 = np.zeros((5, 2), dtype=np.float32)
landmark5[0] = (landmark[36] + landmark[39]) / 2
landmark5[1] = (landmark[42] + landmark[45]) / 2
landmark5[2] = landmark[30]
landmark5[3] = landmark[48]
landmark5[4] = landmark[54]
else:
landmark5 = landmark
tform = trans.SimilarityTransform()
tform.estimate(landmark5, self.src)
M = tform.params[0:2, :]
img = cv2.warpAffine(rimg,
M, (self.image_size[1], self.image_size[0]),
borderValue=0.0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_flip = np.fliplr(img)
img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB
img_flip = np.transpose(img_flip, (2, 0, 1))
input_blob = np.zeros((2, 3, self.image_size[1], self.image_size[0]), dtype=np.uint8)
input_blob[0] = img
input_blob[1] = img_flip
return input_blob
def forward_db(self, batch_data):
imgs = torch.Tensor(batch_data).cuda()
imgs.div_(255).sub_(0.5).div_(0.5)
feat = self.model(imgs)
feat = feat.reshape([self.batch_size, 2 * feat.shape[1]])
return feat.cpu().numpy()
return img_feat
batch_size = args.batch_size
data_shape = (3, 112, 112)
files = files_list
print('files:', len(files))
rare_size = len(files) % batch_size
faceness_scores = []
batch = 0
img_feats = np.empty((len(files), 1024), dtype=np.float32)
batch_data = np.empty((2 * batch_size, 3, 112, 112))
embedding = Embedding(model_path, data_shape, batch_size)
for img_index, each_line in enumerate(files[:len(files) - rare_size]):
name_lmk_score = each_line.strip().split(' ')
img_name = os.path.join(img_path, name_lmk_score[0])
img = cv2.imread(img_name)
lmk = np.array([float(x) for x in name_lmk_score[1:-1]],
dtype=np.float32)
lmk = lmk.reshape((5, 2))
input_blob = embedding.get(img, lmk)
batch_data[2 * (img_index - batch * batch_size)][:] = input_blob[0]
batch_data[2 * (img_index - batch * batch_size) + 1][:] = input_blob[1]
if (img_index + 1) % batch_size == 0:
print('batch', batch)
img_feats[batch * batch_size:batch * batch_size +
batch_size][:] = embedding.forward_db(batch_data)
batch += 1
faceness_scores.append(name_lmk_score[-1])
batch_data = np.empty((2 * rare_size, 3, 112, 112))
embedding = Embedding(model_path, data_shape, rare_size)
for img_index, each_line in enumerate(files[len(files) - rare_size:]):
name_lmk_score = each_line.strip().split(' ')
img_name = os.path.join(img_path, name_lmk_score[0])
img = cv2.imread(img_name)
lmk = np.array([float(x) for x in name_lmk_score[1:-1]],
dtype=np.float32)
lmk = lmk.reshape((5, 2))
input_blob = embedding.get(img, lmk)
batch_data[2 * img_index][:] = input_blob[0]
batch_data[2 * img_index + 1][:] = input_blob[1]
if (img_index + 1) % rare_size == 0:
print('batch', batch)
img_feats[len(files) -
rare_size:][:] = embedding.forward_db(batch_data)
batch += 1
faceness_scores.append(name_lmk_score[-1])
faceness_scores = np.array(faceness_scores).astype(np.float32)
return img_feats, faceness_score
return img_feat
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
files = img_list.readlines()
img_feats, faceness_scores = get_image_feature(img_path, files_list,
model_path, 0, gpu_id)
print('Time: %.2f s. ' % (stop - start))
print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],
img_feats.shape[1]))
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
if not os.path.exists(save_path):
os.makedirs(save_path)
np.save(score_save_file, score)
files = [score_save_file]
print(tpr_fpr_table)
def get_image_feature(img_path, files_list, model_path, epoch, gpu_id):
batch_size = args.batch_size
data_shape = (3, 112, 112)
files = files_list
print('files:', len(files))
rare_size = len(files) % batch_size
faceness_scores = []
batch = 0
img_feats = np.empty((len(files), 1024), dtype=np.float32)
batch_data = np.empty((2 * batch_size, 3, 112, 112))
embedding = Embedding(model_path, data_shape, batch_size)
for img_index, each_line in enumerate(files[:len(files) - rare_size]):
name_lmk_score = each_line.strip().split(' ')
img_name = os.path.join(img_path, name_lmk_score[0])
img = cv2.imread(img_name)
lmk = np.array([float(x) for x in name_lmk_score[1:-1]],
dtype=np.float32)
lmk = lmk.reshape((5, 2))
input_blob = embedding.get(img, lmk)
batch_data[2 * (img_index - batch * batch_size)][:] = input_blob[0]
batch_data[2 * (img_index - batch * batch_size) + 1][:] = input_blob[1]
if (img_index + 1) % batch_size == 0:
print('batch', batch)
img_feats[batch * batch_size:batch * batch_size +
batch_size][:] = embedding.forward_db(batch_data)
batch += 1
faceness_scores.append(name_lmk_score[-1])
batch_data = np.empty((2 * rare_size, 3, 112, 112))
embedding = Embedding(model_path, data_shape, rare_size)
for img_index, each_line in enumerate(files[len(files) - rare_size:]):
name_lmk_score = each_line.strip().split(' ')
img_name = os.path.join(img_path, name_lmk_score[0])
img = cv2.imread(img_name)
lmk = np.array([float(x) for x in name_lmk_score[1:-1]],
dtype=np.float32)
lmk = lmk.reshape((5, 2))
input_blob = embedding.get(img, lmk)
batch_data[2 * img_index][:] = input_blob[0]
batch_data[2 * img_index + 1][:] = input_blob[1]
if (img_index + 1) % rare_size == 0:
print('batch', batch)
img_feats[len(files) -
rare_size:][:] = embedding.forward_db(batch_data)
batch += 1
faceness_scores.append(name_lmk_score[-1])
faceness_scores = np.array(faceness_scores).astype(np.float32)
# img_feats = np.ones( (len(files), 1024), dtype=np.float32) * 0.01
# faceness_scores = np.ones( (len(files), ), dtype=np.float32 )
return img_feats, faceness_scores | null |
3,337 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
print('files:', len(files))
unique_templates = np.unique(templates)
template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
for count_template, uqt in enumerate(unique_templates):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias,
return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [
np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)
]
media_norm_feats = np.array(media_norm_feats)
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
template_feats[count_template] = np.sum(media_norm_feats, axis=0)
if count_template % 2000 == 0:
print('Finish Calculating {} template features.'.format(
count_template))
template_norm_feats = sklearn.preprocessing.normalize(template_feats)
return template_norm_feats, unique_template
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],
img_feats.shape[1]))
template_norm_feats, unique_templates = image2template_feature(
img_input_feats, templates, medias)
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
np.save(score_save_file, score)
print(tpr_fpr_table)
def image2template_feature(img_feats=None, templates=None, medias=None):
# ==========================================================
# 1. face image feature l2 normalization. img_feats:[number_image x feats_dim]
# 2. compute media feature.
# 3. compute template feature.
# ==========================================================
unique_templates = np.unique(templates)
template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
for count_template, uqt in enumerate(unique_templates):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias,
return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [
np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)
]
media_norm_feats = np.array(media_norm_feats)
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
template_feats[count_template] = np.sum(media_norm_feats, axis=0)
if count_template % 2000 == 0:
print('Finish Calculating {} template features.'.format(
count_template))
# template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True))
template_norm_feats = sklearn.preprocessing.normalize(template_feats)
# print(template_norm_feats.shape)
return template_norm_feats, unique_templates | null |
3,338 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
for i in range(n)]
for i, e in enumerate(listTemp):
twoList[i % n].append(e)
print('files:', len(files))
for count_template, uqt in enumerate(unique_templates):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias,
return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [
np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)
]
media_norm_feats = np.array(media_norm_feats)
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
template_feats[count_template] = np.sum(media_norm_feats, axis=0)
if count_template % 2000 == 0:
print('Finish Calculating {} template features.'.format(
count_template))
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),))
total_pairs = np.array(range(len(p1)))
batchsize = 100000
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return scor
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),))
total_pairs = np.array(range(len(p1)))
batchsize = 100000
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return scor
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],
img_feats.shape[1]))
print('Time: %.2f s. ' % (stop - start))
score = verification(template_norm_feats, unique_templates, p1, p2)
print('Time: %.2f s. ' % (stop - start))
np.save(score_save_file, score)
print(tpr_fpr_table)
def verification(template_norm_feats=None,
unique_templates=None,
p1=None,
p2=None):
# ==========================================================
# Compute set-to-set Similarity Score.
# ==========================================================
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),)) # save cosine distance between pairs
total_pairs = np.array(range(len(p1)))
batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return score | null |
3,339 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
for i in range(n)]
for i, e in enumerate(listTemp):
twoList[i % n].append(e)
print('files:', len(files))
for count_template, uqt in enumerate(unique_templates):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias,
return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [
np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)
]
media_norm_feats = np.array(media_norm_feats)
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
template_feats[count_template] = np.sum(media_norm_feats, axis=0)
if count_template % 2000 == 0:
print('Finish Calculating {} template features.'.format(
count_template))
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),))
total_pairs = np.array(range(len(p1)))
batchsize = 100000
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return scor
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),))
total_pairs = np.array(range(len(p1)))
batchsize = 100000
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return scor
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
print('Time: %.2f s. ' % (stop - start))
print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],
img_feats.shape[1]))
print('Time: %.2f s. ' % (stop - start))
score = verification(template_norm_feats, unique_templates, p1, p2)
print('Time: %.2f s. ' % (stop - start))
np.save(score_save_file, score)
print(tpr_fpr_table)
def verification2(template_norm_feats=None,
unique_templates=None,
p1=None,
p2=None):
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1),)) # save cosine distance between pairs
total_pairs = np.array(range(len(p1)))
batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return score | null |
3,340 | import os
import pickle
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import timeit
import sklearn
import argparse
import cv2
import numpy as np
import torch
from skimage import transform as trans
from backbones import get_model
from sklearn.metrics import roc_curve, auc
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from pathlib import Path
import sys
import warnings
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feat
img_feats = np.empty((len(files), 1024), dtype=np.float32)
return img_feats, faceness_score
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feat
img_feats, faceness_scores = get_image_feature(img_path, files_list,
model_path, 0, gpu_id)
def read_score(path):
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feats | null |
3,341 | import numpy as np
import onnx
import torch
def convert_onnx(net, path_module, output, opset=11, simplify=False):
assert isinstance(net, torch.nn.Module)
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
img = img.astype(np.float)
img = (img / 255. - 0.5) / 0.5 # torch style norm
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img).unsqueeze(0).float()
weight = torch.load(path_module)
net.load_state_dict(weight)
net.eval()
torch.onnx.export(net, img, output, keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
model = onnx.load(output)
graph = model.graph
graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
if simplify:
from onnxsim import simplify
model, check = simplify(model)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model, output) | null |
3,342 | import logging
import os
import sys
def init_logging(rank, models_root):
if rank == 0:
log_root = logging.getLogger()
log_root.setLevel(logging.INFO)
formatter = logging.Formatter("Training: %(asctime)s-%(message)s")
handler_file = logging.FileHandler(os.path.join(models_root, "training.log"))
handler_stream = logging.StreamHandler(sys.stdout)
handler_file.setFormatter(formatter)
handler_stream.setFormatter(formatter)
log_root.addHandler(handler_file)
log_root.addHandler(handler_stream)
log_root.info('rank_id: %d' % rank) | null |
3,343 | import os
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from prettytable import PrettyTable
from sklearn.metrics import roc_curve, auc
def read_template_pair_list(path):
pairs = pd.read_csv(path, sep=' ', header=None).values
t1 = pairs[:, 0].astype(np.int)
t2 = pairs[:, 1].astype(np.int)
label = pairs[:, 2].astype(np.int)
return t1, t2, label | null |
3,344 | import importlib
import os.path as osp
def get_config(config_file):
assert config_file.startswith('configs/'), 'config file setting must start with configs/'
temp_config_name = osp.basename(config_file)
temp_module_name = osp.splitext(temp_config_name)[0]
config = importlib.import_module("configs.base")
cfg = config.config
config = importlib.import_module("configs.%s" % temp_module_name)
job_cfg = config.config
cfg.update(job_cfg)
if cfg.output is None:
cfg.output = osp.join('work_dirs', temp_module_name)
return cfg | null |
3,345 | import datetime
import os
import pickle
import mxnet as mx
import numpy as np
import sklearn
import torch
from mxnet import ndarray as nd
from scipy import interpolate
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
def calculate_roc(thresholds,
embeddings1,
embeddings2,
actual_issame,
nrof_folds=10,
pca=0):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
if pca == 0:
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if pca > 0:
print('doing pca on', fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(
threshold, dist[test_set],
actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy
def calculate_val(thresholds,
embeddings1,
embeddings2,
actual_issame,
far_target,
nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(
threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(
threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy = calculate_roc(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
pca=pca)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
1e-3,
nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far | null |
3,346 | import datetime
import os
import pickle
import mxnet as mx
import numpy as np
import sklearn
import torch
from mxnet import ndarray as nd
from scipy import interpolate
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
def load_bin(path, image_size):
try:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f) # py2
except UnicodeDecodeError as e:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f, encoding='bytes') # py3
data_list = []
for flip in [0, 1]:
data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))
data_list.append(data)
for idx in range(len(issame_list) * 2):
_bin = bins[idx]
img = mx.image.imdecode(_bin)
if img.shape[1] != image_size[0]:
img = mx.image.resize_short(img, image_size[0])
img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0, 1]:
if flip == 1:
img = mx.ndarray.flip(data=img, axis=2)
data_list[flip][idx][:] = torch.from_numpy(img.asnumpy())
if idx % 1000 == 0:
print('loading bin', idx)
print(data_list[0].shape)
return data_list, issame_list | null |
3,347 | import datetime
import os
import pickle
import mxnet as mx
import numpy as np
import sklearn
import torch
from mxnet import ndarray as nd
from scipy import interpolate
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
def dumpR(data_set,
backbone,
batch_size,
name='',
data_extra=None,
label_shape=None):
print('dump verification embedding..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data, _data_extra),
label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join('temp.bin')
with open(outname, 'wb') as f:
pickle.dump((embeddings, issame_list),
f,
protocol=pickle.HIGHEST_PROTOCOL) | null |
3,348 | import numpy as np
import torch
import torch.nn.functional as F
from scipy.io import loadmat
from src.face3d.util.load_mats import transferBFM09
import os
def perspective_projection(focal, center):
# return p.T (N, 3) @ (3, 3)
return np.array([
focal, 0, center,
0, focal, center,
0, 0, 1
]).reshape([3, 3]).astype(np.float32).transpose() | null |
3,349 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
def resize_n_crop(image, M, dsize=112):
# image: (b, c, h, w)
# M : (b, 2, 3)
return warp_affine(image, M, dsize=(dsize, dsize), align_corners=True) | null |
3,350 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
def filter_state_dict(state_dict, remove_name='fc'):
new_state_dict = {}
for key in state_dict:
if remove_name in key:
continue
new_state_dict[key] = state_dict[key]
return new_state_dict | null |
3,351 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
The provided code snippet includes necessary dependencies for implementing the `get_scheduler` function. Write a Python function `def get_scheduler(optimizer, opt)` to solve the following problem:
Return a learning rate scheduler Parameters: optimizer -- the optimizer of the network opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. See https://pytorch.org/docs/stable/optim.html for more details.
Here is the function:
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_epochs, gamma=0.2)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler | Return a learning rate scheduler Parameters: optimizer -- the optimizer of the network opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. See https://pytorch.org/docs/stable/optim.html for more details. |
3,352 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class ReconNetWrapper(nn.Module):
def __init__(self, net_recon, use_last_fc=False, init_path=None):
def forward(self, x):
def define_net_recon(net_recon, use_last_fc=False, init_path=None):
return ReconNetWrapper(net_recon, use_last_fc=use_last_fc, init_path=init_path) | null |
3,353 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class RecogNetWrapper(nn.Module):
def __init__(self, net_recog, pretrained_path=None, input_size=112):
super(RecogNetWrapper, self).__init__()
net = get_model(name=net_recog, fp16=False)
if pretrained_path:
state_dict = torch.load(pretrained_path, map_location='cpu')
net.load_state_dict(state_dict)
print("loading pretrained net_recog %s from %s" %(net_recog, pretrained_path))
for param in net.parameters():
param.requires_grad = False
self.net = net
self.preprocess = lambda x: 2 * x - 1
self.input_size=input_size
def forward(self, image, M):
image = self.preprocess(resize_n_crop(image, M, self.input_size))
id_feature = F.normalize(self.net(image), dim=-1, p=2)
return id_feature
def define_net_recog(net_recog, pretrained_path=None):
net = RecogNetWrapper(net_recog=net_recog, pretrained_path=pretrained_path)
net.eval()
return net | null |
3,354 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation) | 3x3 convolution with padding |
3,355 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes: int, out_planes: int, stride: int = 1, bias: bool = False) -> nn.Conv2d` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes: int, out_planes: int, stride: int = 1, bias: bool = False) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias) | 1x1 convolution |
3,356 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet18` function. Write a Python function `def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs) | r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,357 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet34` function. Write a Python function `def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs) | r"""ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,358 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet50` function. Write a Python function `def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs) | r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,359 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet101` function. Write a Python function `def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs) | r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,360 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet152` function. Write a Python function `def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs) | r"""ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,361 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `resnext50_32x4d` function. Write a Python function `def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs) | r"""ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,362 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `resnext101_32x8d` function. Write a Python function `def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | r"""ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,363 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `wide_resnet50_2` function. Write a Python function `def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs) | r"""Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,364 | import os
import numpy as np
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from .arcface_torch.backbones import get_model
from kornia.geometry import warp_affine
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
use_last_fc: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.use_last_fc = use_last_fc
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if self.use_last_fc:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.use_last_fc:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `wide_resnet101_2` function. Write a Python function `def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet` to solve the following problem:
r"""Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | r"""Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
3,365 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.