repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/monsterPerson.py | __author__ = 'Erilyth'
import pygame
import os
from .person import Person
'''
This class defines all the Monsters present in our game.
Each Monster can only move on the top floor and cannot move vertically.
'''
class MonsterPerson(Person):
def __init__(self, raw_image, position, rng, dir, width=15, height=15):
super(MonsterPerson, self).__init__(raw_image, position, width, height)
self.__speed = 2
self.rng = rng
self.__direction = int(self.rng.rand() * 100) % 2
self.__cycles = 0
self.__stopDuration = 0
self.IMAGES = {
"monster0": pygame.image.load(os.path.join(dir, 'assets/monster0.png')).convert_alpha(),
"monster1": pygame.image.load(os.path.join(dir, 'assets/monster1.png')).convert_alpha(),
"monster2": pygame.image.load(os.path.join(dir, 'assets/monster2.png')).convert_alpha(),
"monster3": pygame.image.load(os.path.join(dir, 'assets/monster3.png')).convert_alpha(),
"monster01": pygame.image.load(os.path.join(dir, 'assets/monster01.png')).convert_alpha(),
"monster11": pygame.image.load(os.path.join(dir, 'assets/monster11.png')).convert_alpha(),
"monster21": pygame.image.load(os.path.join(dir, 'assets/monster21.png')).convert_alpha(),
"monster31": pygame.image.load(os.path.join(dir, 'assets/monster31.png')).convert_alpha(),
"monsterstill0": pygame.image.load(os.path.join(dir, 'assets/monsterstill0.png')).convert_alpha(),
"monsterstill10": pygame.image.load(os.path.join(dir, 'assets/monsterstill10.png')).convert_alpha(),
"monsterstill1": pygame.image.load(os.path.join(dir, 'assets/monsterstill1.png')).convert_alpha(),
"monsterstill11": pygame.image.load(os.path.join(dir, 'assets/monsterstill11.png')).convert_alpha()
}
# Getters and Setters
def getSpeed(self):
return self.__speed
def setSpeed(self):
return self.__speed
def getStopDuration(self):
return self.__stopDuration
def setStopDuration(self, stopDuration):
self.__stopDuration = stopDuration
# Checks for collisions with walls in order to change direction when hit
# by a wall
def checkWall(self, colliderGroup):
if self.__direction == 0:
# Right collision with wall
self.updateWH(self.image, "H", 20, 40, 40)
if self.__direction == 1:
# Left collision with wall
self.updateWH(self.image, "H", -20, 40, 40)
Colliders = pygame.sprite.spritecollide(self, colliderGroup, False)
if self.__direction == 0:
# Right collision with wall
self.updateWH(self.image, "H", -20, 40, 40)
if self.__direction == 1:
# Left collision with wall
self.updateWH(self.image, "H", 20, 40, 40)
return Colliders
# This is used to animate the monster
def continuousUpdate(self, GroupList, GroupList2):
# If the stop duration is 0 then monster is currently moving either
# left or right
if self.__stopDuration == 0:
# Currently moving right
if self.__direction == 0:
self.__cycles += 1
if self.__cycles % 24 < 6:
self.updateWH(
self.IMAGES["monster0"], "H", self.__speed, 45, 45)
elif self.__cycles % 24 < 12:
self.updateWH(
self.IMAGES["monster1"], "H", self.__speed, 45, 45)
elif self.__cycles % 24 < 18:
self.updateWH(
self.IMAGES["monster2"], "H", self.__speed, 45, 45)
else:
self.updateWH(
self.IMAGES["monster3"], "H", self.__speed, 45, 45)
if self.checkWall(GroupList):
self.__direction = 1
self.__cycles = 0
self.updateWH(self.image, "H", -self.__speed, 45, 45)
# Currently moving left
else:
self.__cycles += 1
if self.__cycles % 24 < 6:
self.updateWH(
self.IMAGES["monster01"], "H", -self.__speed, 45, 45)
elif self.__cycles % 24 < 12:
self.updateWH(
self.IMAGES["monster11"], "H", -self.__speed, 45, 45)
elif self.__cycles % 24 < 18:
self.updateWH(
self.IMAGES["monster21"], "H", -self.__speed, 45, 45)
else:
self.updateWH(
self.IMAGES["monster31"], "H", -self.__speed, 45, 45)
if self.checkWall(GroupList):
self.__direction = 0
self.__cycles = 0
self.updateWH(self.image, "H", self.__speed, 45, 45)
# Donkey Kong is currently not moving, which means he is launching a
# fireball
else:
self.__stopDuration -= 1
if self.__stopDuration == 0: # Once he finishes launching a fireball, we go back to our normal movement animation
self.updateWH(self.image, "V", 12, 50, 50)
if self.__stopDuration >= 10:
if self.__direction == 0:
self.updateWH(self.IMAGES["monsterstill0"], "H", 0, 45, 45)
else:
self.updateWH(
self.IMAGES["monsterstill10"], "H", 0, 45, 45)
elif self.__stopDuration >= 5:
if self.__direction == 0:
self.updateWH(self.IMAGES["monsterstill1"], "H", 0, 45, 45)
else:
self.updateWH(
self.IMAGES["monsterstill11"], "H", 0, 45, 45)
else:
if self.__direction == 0:
self.updateWH(self.IMAGES["monsterstill0"], "H", 0, 45, 45)
else:
self.updateWH(
self.IMAGES["monsterstill10"], "H", 0, 45, 45)
| 6,131 | 43.434783 | 126 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/onBoard.py | __author__ = 'Batchu Vishal'
import pygame
class OnBoard(pygame.sprite.Sprite):
'''
This class defines all inanimate objects that we need to display on our board.
Any object that is on the board and not a person, comes under this class (ex. Coins,Ladders,Walls etc)
Sets up the image and its position for all its child classes.
'''
def __init__(self, raw_image, position):
pygame.sprite.Sprite.__init__(self)
self.__position = position
self.image = raw_image
self.image = pygame.transform.scale(self.image,
(15, 15)) # Image and Rect required for the draw function on sprites
self.rect = self.image.get_rect()
self.rect.center = self.__position
# Getters and Setters
def setCenter(self, position):
self.rect.center = position
def getPosition(self):
return self.__position
def setPosition(self, position):
self.__position = position
# Update Image, this is an abstract method, needs to be implemented in the
# subclass with whatever size required
def updateImage(self, raw_image): # Abstract Method
raise NotImplementedError("Subclass must implement this")
# Modify the size of the image
def modifySize(self, raw_image, height, width):
self.image = raw_image
self.image = pygame.transform.scale(self.image, (width, height))
| 1,433 | 34.85 | 113 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/person.py | __author__ = 'Batchu Vishal'
import pygame
'''
This class defines all living things in the game, ex.Donkey Kong, Player etc
Each of these objects can move in any direction specified.
'''
class Person(pygame.sprite.Sprite):
def __init__(self, raw_image, position, width, height):
super(Person, self).__init__()
self.width = width
self.height = height
self.__position = position
self.image = raw_image
self.image = pygame.transform.scale(
self.image, (width, height)).convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = self.__position
'''
We set these as abstract methods since this class does not have a speed variable set, but we want all the child classes to
set a movement speed and they should have setters and getters for this movement speed.
'''
def getSpeed(self): # Abstract method
raise NotImplementedError("Subclass must implement this")
def setSpeed(self): # Abstract method
raise NotImplementedError("Subclass must implement this")
# Getters and Setters
def setCenter(self, position):
self.rect.center = position
def getPosition(self):
return self.__position
def setPosition(self, position):
self.__position = position
# Move the person in the horizontal ("H") or vertical ("V") axis
def updateWH(self, raw_image, direction, value, width, height):
if direction == "H":
self.__position = (self.__position[0] + value, self.__position[1])
if direction == "V":
self.__position = (self.__position[0], self.__position[1] + value)
self.image = raw_image
# Update the image to the specified width and height
#self.image = pygame.transform.scale(self.image, (width, height))
self.rect.center = self.__position
# When you only need to update vertically
def updateY(self, value):
self.__position = (self.__position[0], self.__position[1] + value)
self.rect.center = self.__position
# Given a collider list, just check if the person instance collides with
# any of them
def checkCollision(self, colliderGroup):
Colliders = pygame.sprite.spritecollide(self, colliderGroup, False)
return Colliders
# This is another abstract function, and it must be implemented in child
# classes inheriting from this class
def continuousUpdate(self, GroupList, GroupList2):
# continuousUpdate that gets called frequently for collision checks,
# movement etc
raise NotImplementedError("Subclass must implement this")
| 2,660 | 35.958333 | 126 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/coin.py | __author__ = 'Batchu Vishal'
import pygame
import os
from .onBoard import OnBoard
class Coin(OnBoard):
"""
This class defines all our coins.
Each coin will increase our score by an amount of 'value'
We animate each coin with 5 images
A coin inherits from the OnBoard class since we will use it as an inanimate object on our board.
"""
def __init__(self, raw_image, position, _dir):
OnBoard.__init__(self, raw_image, position)
self.__coinAnimState = 0 # Initialize animation state to 0
self.IMAGES = {
"coin1": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin1.png')), (15, 15)).convert_alpha(),
"coin2": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin2.png')), (15, 15)).convert_alpha(),
"coin3": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin3.png')), (15, 15)).convert_alpha(),
"coin4": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin4.png')), (15, 15)).convert_alpha(),
"coin5": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin5.png')), (15, 15)).convert_alpha()
}
# Update the image of the coin
def updateImage(self, raw_image):
self.image = raw_image
# Animate the coin
def animateCoin(self):
self.__coinAnimState = (self.__coinAnimState + 1) % 25
if self.__coinAnimState / 5 == 0:
self.updateImage(self.IMAGES["coin1"])
if self.__coinAnimState / 5 == 1:
self.updateImage(self.IMAGES["coin2"])
if self.__coinAnimState / 5 == 2:
self.updateImage(self.IMAGES["coin3"])
if self.__coinAnimState / 5 == 3:
self.updateImage(self.IMAGES["coin4"])
if self.__coinAnimState / 5 == 4:
self.updateImage(self.IMAGES["coin5"])
| 1,900 | 43.209302 | 129 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/player.py | __author__ = 'Batchu Vishal'
from .person import Person
'''
This class defines our player.
It inherits from the Person class since a Player is also a person.
We specialize the person by adding capabilities such as jump etc..
'''
class Player(Person):
def __init__(self, raw_image, position, width, height):
super(Player, self).__init__(raw_image, position, width, height)
self.isJumping = 0
self.onLadder = 0
self.currentJumpSpeed = 0
self.__gravity = 0.85 # Gravity affecting the jump velocity of the player
self.__speed = 5 # Movement speed of the player
# Getters and Setters
def getSpeed(self):
return self.__speed
def setSpeed(self):
return self.__speed
# This manages the players jump
# Only the player can jump (For the player's jump)
def continuousUpdate(self, wallGroupList, ladderGroupList):
# Only gets run when the player is not on the ladder
if self.onLadder == 0:
wallsCollided = self.checkCollision(wallGroupList)
# If the player is not jumping
if self.isJumping == 0:
# We move down a little and check if we collide with anything
self.updateY(2)
laddersCollided = self.checkCollision(ladderGroupList)
wallsCollided = self.checkCollision(wallGroupList)
self.updateY(-2)
# If we are not colliding with anything below, then we start a
# jump with 0 speed so that we just fall down
if len(wallsCollided) == 0 and len(laddersCollided) == 0:
self.isJumping = 1
self.currentJumpSpeed = 0
# If the player is jumping
if self.isJumping:
if wallsCollided:
# If you collide a wall while jumping and its below you,
# then you stop the jump
if wallsCollided[0].getPosition()[1] > self.getPosition()[
1]: # wallsize/2 and charsize/2 and +1
self.isJumping = 0
self.setPosition(((self.getPosition()[0], wallsCollided[0].getPosition()[
1] - (self.height + 1)))) # Wall size/2 and charactersize/2 and +1
# print "HIT FLOOR"
# If you collide a wall while jumping and its above you,
# then you hit the ceiling so you make jump speed 0 so he
# falls down
elif wallsCollided[0].getPosition()[1] < self.getPosition()[1]:
self.currentJumpSpeed = 0
self.setPosition((self.getPosition()[0], wallsCollided[
0].getPosition()[1] + (self.height + 1)))
# print "HIT TOP"
self.setCenter(self.getPosition())
# If he is still jumping (ie. hasnt touched the floor yet)
if self.isJumping:
# We move him down by the currentJumpSpeed
self.updateY(-self.currentJumpSpeed)
self.setCenter(self.getPosition())
self.currentJumpSpeed -= self.__gravity # Affect the jump speed with gravity
if self.currentJumpSpeed < -8:
self.currentJumpSpeed = -8
| 3,438 | 44.25 | 97 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/wall.py | __author__ = 'Batchu Vishal'
from onBoard import OnBoard
import pygame
'''
This class defines all our walls in the game.
Currently not much is done here, but we can add traps to certain walls such as spiked walls etc to damage the player
'''
class Wall(OnBoard):
def __init__(self, raw_image, position):
super(Wall, self).__init__(raw_image, position)
# Update the ladder image
def updateImage(self, raw_image):
self.image = raw_image
self.image = pygame.transform.scale(self.image, (15, 15))
| 534 | 25.75 | 116 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/__init__.py | __author__ = 'Batchu Vishal'
import pygame
import sys
from pygame.constants import K_a, K_d, K_SPACE, K_w, K_s, QUIT, KEYDOWN
from .board import Board
#from ..base import base
#from ple.games import base
from ple.games.base.pygamewrapper import PyGameWrapper
import numpy as np
import os
class MonsterKong(PyGameWrapper):
def __init__(self):
"""
Parameters
----------
None
"""
self.height = 465
self.width = 500
actions = {
"left": K_a,
"right": K_d,
"jump": K_SPACE,
"up": K_w,
"down": K_s
}
PyGameWrapper.__init__(
self, self.width, self.height, actions=actions)
self.rewards = {
"positive": 5,
"win": 50,
"negative": -25,
"tick": 0
}
self.allowed_fps = 30
self._dir = os.path.dirname(os.path.abspath(__file__))
self.IMAGES = {
"right": pygame.image.load(os.path.join(self._dir, 'assets/right.png')),
"right2": pygame.image.load(os.path.join(self._dir, 'assets/right2.png')),
"left": pygame.image.load(os.path.join(self._dir, 'assets/left.png')),
"left2": pygame.image.load(os.path.join(self._dir, 'assets/left2.png')),
"still": pygame.image.load(os.path.join(self._dir, 'assets/still.png'))
}
def init(self):
# Create a new instance of the Board class
self.newGame = Board(
self.width,
self.height,
self.rewards,
self.rng,
self._dir)
# Initialize the fireball timer
self.fireballTimer = 0
# Assign groups from the Board instance that was created
self.playerGroup = self.newGame.playerGroup
self.wallGroup = self.newGame.wallGroup
self.ladderGroup = self.newGame.ladderGroup
def getScore(self):
return self.newGame.score
def game_over(self):
return self.newGame.lives <= 0
def step(self, dt):
self.newGame.score += self.rewards["tick"]
# This is where the actual game is run
# Get the appropriate groups
self.fireballGroup = self.newGame.fireballGroup
self.coinGroup = self.newGame.coinGroup
# Create fireballs as required, depending on the number of monsters in
# our game at the moment
if self.fireballTimer == 0:
self.newGame.CreateFireball(
self.newGame.Enemies[0].getPosition(), 0)
elif len(self.newGame.Enemies) >= 2 and self.fireballTimer == 23:
self.newGame.CreateFireball(
self.newGame.Enemies[1].getPosition(), 1)
elif len(self.newGame.Enemies) >= 3 and self.fireballTimer == 46:
self.newGame.CreateFireball(
self.newGame.Enemies[2].getPosition(), 2)
self.fireballTimer = (self.fireballTimer + 1) % 70
# Animate the coin
for coin in self.coinGroup:
coin.animateCoin()
# To check collisions below, we move the player downwards then check
# and move him back to his original location
self.newGame.Players[0].updateY(2)
self.laddersCollidedBelow = self.newGame.Players[
0].checkCollision(self.ladderGroup)
self.wallsCollidedBelow = self.newGame.Players[
0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(-2)
# To check for collisions above, we move the player up then check and
# then move him back down
self.newGame.Players[0].updateY(-2)
self.wallsCollidedAbove = self.newGame.Players[
0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(2)
# Sets the onLadder state of the player
self.newGame.ladderCheck(
self.laddersCollidedBelow,
self.wallsCollidedBelow,
self.wallsCollidedAbove)
for event in pygame.event.get():
# Exit to desktop
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# Get the ladders collided with the player
self.laddersCollidedExact = self.newGame.Players[
0].checkCollision(self.ladderGroup)
if (event.key == self.actions["jump"] and self.newGame.Players[0].onLadder == 0) or (
event.key == self.actions["up"] and self.laddersCollidedExact):
# Set the player to move up
self.direction = 2
if self.newGame.Players[
0].isJumping == 0 and self.wallsCollidedBelow:
# We can make the player jump and set his
# currentJumpSpeed
self.newGame.Players[0].isJumping = 1
self.newGame.Players[0].currentJumpSpeed = 7
if event.key == self.actions["right"]:
if self.newGame.direction != 4:
self.newGame.direction = 4
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 4
if self.newGame.cycles < 2:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right2"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[
0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to
# where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
if event.key == self.actions["left"]:
if self.newGame.direction != 3:
self.newGame.direction = 3
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 4
if self.newGame.cycles < 2:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left2"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[
0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to
# where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
# If we are on a ladder, then we can move up
if event.key == self.actions[
"up"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
-self.newGame.Players[0].getSpeed() / 2, 15, 15)
if len(self.newGame.Players[0].checkCollision(self.ladderGroup)) == 0 or len(
self.newGame.Players[0].checkCollision(self.wallGroup)) != 0:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# If we are on a ladder, then we can move down
if event.key == self.actions[
"down"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# Update the player's position and process his jump if he is jumping
self.newGame.Players[0].continuousUpdate(
self.wallGroup, self.ladderGroup)
'''
We use cycles to animate the character, when we change direction we also reset the cycles
We also change the direction according to the key pressed
'''
# Redraws all our instances onto the screen
self.newGame.redrawScreen(self.screen, self.width, self.height)
# Update the fireball and check for collisions with player (ie Kill the
# player)
self.newGame.fireballCheck()
# Collect a coin
coinsCollected = pygame.sprite.spritecollide(
self.newGame.Players[0], self.coinGroup, True)
self.newGame.coinCheck(coinsCollected)
# Check if you have reached the princess
self.newGame.checkVictory()
# Update all the monsters
for enemy in self.newGame.Enemies:
enemy.continuousUpdate(self.wallGroup, self.ladderGroup)
| 9,882 | 41.78355 | 104 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/doom/doom.py | import os
from ..base.doomwrapper import DoomWrapper
class Doom(DoomWrapper):
def __init__(self, scenario="basic"):
cfg_file = "assets/cfg/%s.cfg" % scenario
scenario_file = "%s.wad" % scenario
width = 320
height = 240
package_directory = os.path.dirname(os.path.abspath(__file__))
cfg_file = os.path.join( package_directory, cfg_file )
DoomWrapper.__init__(self, width, height,
cfg_file, scenario_file)
| 506 | 28.823529 | 70 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/doom/__init__.py | from .doom import Doom
| 23 | 11 | 22 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/base/__init__.py | from .pygamewrapper import PyGameWrapper
try:
from .doomwrapper import DoomWrapper
except:
print("couldn't import doomish")
| 132 | 21.166667 | 40 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/base/doomwrapper.py | import sys
import time
import numpy as np
import pygame
try:
#ty @ gdb & ppaquette
import doom_py
import doom_py.vizdoom as vizdoom
except ImportError:
raise ImportError("Please install doom_py.")
class DoomWrapper(object):
def __init__(self, width, height, cfg_file, scenario_file):
self.doom_game = doom_py.DoomGame()
self._loader = doom_py.Loader()
#make most sense to keep cfg and wads together.
#which is why we ship them all together
self.cfg_file = cfg_file
self.scenario_file = self._loader.get_scenario_path(scenario_file)
self.freedom_file = self._loader.get_freedoom_path()
self.vizdoom_file = self._loader.get_vizdoom_path()
self.state = None
self.num_actions = 0
self.action = None
self.NOOP = [0]*40
self.height = height
self.width = width
self.screen_dim = (width, height)
self.allowed_fps = None
self.rng = None
self._window = DoomWindow(width, height)
def _setup(self):
self.doom_game.set_screen_format(vizdoom.ScreenFormat.BGR24)
#load the cfg
self.doom_game.load_config(self.cfg_file)
self.doom_game.set_vizdoom_path(self.vizdoom_file)
self.doom_game.set_doom_game_path(self.freedom_file)
self.doom_game.set_doom_scenario_path(self.scenario_file)
self.doom_game.set_window_visible(False) #we use our own window...
self.doom_game.init()
self.num_actions = self.doom_game.get_available_buttons_size()
self.actions = []
for i in range(self.num_actions):
action = [0]*self.num_actions
action[i] = 1
self.actions.append(action)
def _setAction(self, action, last_action):
#make the game perform the action
self.action = action
def _draw_frame(self, draw_screen):
if draw_screen:
self._window.show_frame(self.getScreenRGB())
def setRNG(self, rng):
if isinstance(rng, int):
self.rng = rng
self.doom_game.set_seed(rng)
else:
raise ValueError("ViZDoom needs an int passed as rng")
def getScreenRGB(self):
return self.state.image_buffer.copy()
def tick(self, fps):
time.sleep(1.0/fps) #sleep a bit here (in seconds)
return fps
def adjustRewards(self, rewards):
if "tick" in rewards:
self.doom_game.set_living_reward(rewards["tick"])
if "loss" in rewards:
self.doom_game.set_death_penalty(rewards["loss"])
def getGameState(self):
return self.doom_game.get_state().game_variables
def getScreenDims(self):
return self.screen_dim
def getActions(self):
return self.actions
def init(self):
self.action = None
self.doom_game.new_episode()
self.state = self.doom_game.get_state()
def reset(self):
self.init()
def getScore(self):
return self.doom_game.get_total_reward()
def game_over(self):
return self.doom_game.is_episode_finished()
def _handle_window_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.doom_game.close() #doom quit
pygame.quit() #close window
sys.exit() #close game
def step(self, dt):
self._handle_window_events()
self.state = self.doom_game.get_state()
if self.action is None:
_ = self.doom_game.make_action(self.NOOP)
else:
_ = self.doom_game.make_action(self.action)
class DoomWindow(object):
def __init__(self, width, height):
self.width = width
self.height = height
pygame.init()
self.window = pygame.display.set_mode( (self.width, self.height), pygame.DOUBLEBUF, 24 )
pygame.display.set_caption("PLE ViZDoom")
def show_frame(self, frame):
frame = np.rollaxis(frame, 0, 2) #its HEIGHT, WIDTH, 3
pygame.surfarray.blit_array(self.window, frame)
pygame.display.update()
| 4,175 | 27.60274 | 96 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/base/pygamewrapper.py | import pygame
import numpy as np
from pygame.constants import KEYDOWN, KEYUP, K_F15
class PyGameWrapper(object):
"""PyGameWrapper class
ple.games.base.PyGameWrapper(width, height, actions={})
This :class:`PyGameWrapper` class sets methods all games require. It should be subclassed when creating new games.
Parameters
----------
width: int
The width of the game screen.
height: int
The height of the game screen.
actions: dict
Contains possible actions that the game responds too. The dict keys are used by the game, while the values are `pygame.constants` referring the keys.
Possible actions dict:
>>> from pygame.constants import K_w, K_s
>>> actions = {
>>> "up": K_w,
>>> "down": K_s
>>> }
"""
def __init__(self, width, height, actions={}):
# Required fields
self.actions = actions # holds actions
self.score = 0.0 # required.
self.lives = 0 # required. Can be 0 or -1 if not required.
self.screen = None # must be set to None
self.clock = None # must be set to None
self.height = height
self.width = width
self.screen_dim = (width, height) # width and height
self.allowed_fps = None # fps that the game is allowed to run at.
self.NOOP = K_F15 # the noop key
self.rng = None
self.rewards = {
"positive": 1.0,
"negative": -1.0,
"tick": 0,
"loss": -5.0,
"win": 5.0
}
def _setup(self):
"""
Setups up the pygame env, the display and game clock.
"""
pygame.init()
self.screen = pygame.display.set_mode(self.getScreenDims(), 0, 32)
self.clock = pygame.time.Clock()
def _setAction(self, action, last_action):
"""
Pushes the action to the pygame event queue.
"""
if action is None:
action = self.NOOP
if last_action is None:
last_action = self.NOOP
kd = pygame.event.Event(KEYDOWN, {"key": action})
ku = pygame.event.Event(KEYUP, {"key": last_action})
pygame.event.post(kd)
pygame.event.post(ku)
def _draw_frame(self, draw_screen):
"""
Decides if the screen will be drawn too
"""
if draw_screen == True:
pygame.display.update()
def getScreenRGB(self):
"""
Returns the current game screen in RGB format.
Returns
--------
numpy uint8 array
Returns a numpy array with the shape (width, height, 3).
"""
return pygame.surfarray.array3d(
pygame.display.get_surface()).astype(np.uint8)
def tick(self, fps):
"""
This sleeps the game to ensure it runs at the desired fps.
"""
return self.clock.tick_busy_loop(fps)
def adjustRewards(self, rewards):
"""
Adjusts the rewards the game gives the agent
Parameters
----------
rewards : dict
A dictonary of reward events to float rewards. Only updates if key matches those specificed in the init function.
"""
for key in rewards.keys():
if key in self.rewards:
self.rewards[key] = rewards[key]
def setRNG(self, rng):
"""
Sets the rng for games.
"""
if self.rng is None:
self.rng = rng
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict or None
dict if the game supports it and None otherwise.
"""
return None
def getScreenDims(self):
"""
Gets the screen dimensions of the game in tuple form.
Returns
-------
tuple of int
Returns tuple as follows (width, height).
"""
return self.screen_dim
def getActions(self):
"""
Gets the actions used within the game.
Returns
-------
list of `pygame.constants`
"""
return self.actions.values()
def init(self):
"""
This is used to initialize the game, such reseting the score, lives, and player position.
This is game dependent.
"""
raise NotImplementedError("Please override this method")
def reset(self):
"""
Wraps the init() function, can be setup to reset certain poritions of the game only if needed.
"""
self.init()
def getScore(self):
"""
Return the current score of the game.
Returns
-------
int
The current reward the agent has received since the last init() or reset() call.
"""
raise NotImplementedError("Please override this method")
def game_over(self):
"""
Gets the status of the game, returns True if game has hit a terminal state. False otherwise.
This is game dependent.
Returns
-------
bool
"""
raise NotImplementedError("Please override this method")
def step(self, dt):
"""
This method steps the game forward one step in time equal to the dt parameter. The game does not run unless this method is called.
Parameters
----------
dt : integer
This is the amount of time elapsed since the last frame in milliseconds.
"""
raise NotImplementedError("Please override this method")
| 5,629 | 24.825688 | 157 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/utils/vec2d.py | import math
class vec2d():
def __init__(self, pos):
self.x = pos[0]
self.y = pos[1]
def __add__(self, o):
x = self.x + o.x
y = self.y + o.y
return vec2d((x, y))
def __eq__(self, o):
return self.x == o.x and self.y == o.y
def normalize(self):
norm = math.sqrt(self.x * self.x + self.y * self.y)
self.x /= norm
self.y /= norm
| 419 | 17.26087 | 59 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/utils/__init__.py | import numpy as np
def percent_round_int(percent, x):
return np.round(percent * x).astype(int)
| 101 | 16 | 44 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/flappybird/__init__.py | import os
import sys
import numpy as np
import pygame
from pygame.constants import K_w
from .. import base
class BirdPlayer(pygame.sprite.Sprite):
def __init__(self,
SCREEN_WIDTH, SCREEN_HEIGHT, init_pos,
image_assets, rng, color="red", scale=1.0):
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.image_order = [0, 1, 2, 1]
# done image stuff
pygame.sprite.Sprite.__init__(self)
self.image_assets = image_assets
self.init(init_pos, color)
self.height = self.image.get_height()
self.scale = scale
# all in terms of y
self.vel = 0
self.FLAP_POWER = 9 * self.scale
self.MAX_DROP_SPEED = 10.0
self.GRAVITY = 1.0 * self.scale
self.rng = rng
self._oscillateStartPos() # makes the direction and position random
self.rect.center = (self.pos_x, self.pos_y) # could be done better
def init(self, init_pos, color):
# set up the surface we draw the bird too
self.flapped = True # start off w/ a flap
self.current_image = 0
self.color = color
self.image = self.image_assets[self.color][self.current_image]
self.rect = self.image.get_rect()
self.thrust_time = 0.0
self.game_tick = 0
self.pos_x = init_pos[0]
self.pos_y = init_pos[1]
def _oscillateStartPos(self):
offset = 8 * np.sin(self.rng.rand() * np.pi)
self.pos_y += offset
def flap(self):
if self.pos_y > -2.0 * self.image.get_height():
self.vel = 0.0
self.flapped = True
def update(self, dt):
self.game_tick += 1
# image cycle
if (self.game_tick + 1) % 15 == 0:
self.current_image += 1
if self.current_image >= 3:
self.current_image = 0
# set the image to draw with.
self.image = self.image_assets[self.color][self.current_image]
self.rect = self.image.get_rect()
if self.vel < self.MAX_DROP_SPEED and self.thrust_time == 0.0:
self.vel += self.GRAVITY
# the whole point is to spread this out over the same time it takes in
# 30fps.
if self.thrust_time + dt <= (1.0 / 30.0) and self.flapped:
self.thrust_time += dt
self.vel += -1.0 * self.FLAP_POWER
else:
self.thrust_time = 0.0
self.flapped = False
self.pos_y += self.vel
self.rect.center = (self.pos_x, self.pos_y)
def draw(self, screen):
screen.blit(self.image, self.rect.center)
class Pipe(pygame.sprite.Sprite):
def __init__(self,
SCREEN_WIDTH, SCREEN_HEIGHT, gap_start, gap_size, image_assets, scale,
offset=0, color="green"):
self.speed = 4.0 * scale
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.image_assets = image_assets
# done image stuff
self.width = self.image_assets["green"]["lower"].get_width()
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((self.width, self.SCREEN_HEIGHT))
self.image.set_colorkey((0, 0, 0))
self.init(gap_start, gap_size, offset, color)
def init(self, gap_start, gap_size, offset, color):
self.image.fill((0, 0, 0))
self.gap_start = gap_start
self.x = self.SCREEN_WIDTH + self.width + offset
self.lower_pipe = self.image_assets[color]["lower"]
self.upper_pipe = self.image_assets[color]["upper"]
top_bottom = gap_start - self.upper_pipe.get_height()
bottom_top = gap_start + gap_size
self.image.blit(self.upper_pipe, (0, top_bottom))
self.image.blit(self.lower_pipe, (0, bottom_top))
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.SCREEN_HEIGHT / 2)
def update(self, dt):
self.x -= self.speed
self.rect.center = (self.x, self.SCREEN_HEIGHT / 2)
class Backdrop():
def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT,
image_background, image_base, scale):
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.background_image = image_background
self.base_image = image_base
self.x = 0
self.speed = 4.0 * scale
self.max_move = self.base_image.get_width() - self.background_image.get_width()
def update_draw_base(self, screen, dt):
# the extra is on the right
if self.x > -1 * self.max_move:
self.x -= self.speed
else:
self.x = 0
screen.blit(self.base_image, (self.x, self.SCREEN_HEIGHT * 0.79))
def draw_background(self, screen):
screen.blit(self.background_image, (0, 0))
class FlappyBird(base.PyGameWrapper):
"""
Used physics values from sourabhv's `clone`_.
.. _clone: https://github.com/sourabhv/FlapPyBird
Parameters
----------
width : int (default: 288)
Screen width. Consistent gameplay is not promised for different widths or heights, therefore the width and height should not be altered.
height : inti (default: 512)
Screen height.
pipe_gap : int (default: 100)
The gap in pixels left between the top and bottom pipes.
"""
def __init__(self, width=288, height=512, pipe_gap=100):
actions = {
"up": K_w
}
fps = 30
base.PyGameWrapper.__init__(self, width, height, actions=actions)
self.scale = 30.0 / fps
self.allowed_fps = 30 # restrict the fps
self.pipe_gap = pipe_gap
self.pipe_color = "red"
self.images = {}
# so we can preload images
pygame.display.set_mode((1, 1), pygame.NOFRAME)
self._dir_ = os.path.dirname(os.path.abspath(__file__))
self._asset_dir = os.path.join(self._dir_, "assets/")
self._load_images()
self.pipe_offsets = [0, self.width * 0.5, self.width]
self.init_pos = (
int(self.width * 0.2),
int(self.height / 2)
)
self.pipe_min = int(self.pipe_gap / 4)
self.pipe_max = int(self.height * 0.79 * 0.6 - self.pipe_gap / 2)
self.backdrop = None
self.player = None
self.pipe_group = None
def _load_images(self):
# preload and convert all the images so its faster when we reset
self.images["player"] = {}
for c in ["red", "blue", "yellow"]:
image_assets = [
os.path.join(self._asset_dir, "%sbird-upflap.png" % c),
os.path.join(self._asset_dir, "%sbird-midflap.png" % c),
os.path.join(self._asset_dir, "%sbird-downflap.png" % c),
]
self.images["player"][c] = [pygame.image.load(
im).convert_alpha() for im in image_assets]
self.images["background"] = {}
for b in ["day", "night"]:
path = os.path.join(self._asset_dir, "background-%s.png" % b)
self.images["background"][b] = pygame.image.load(path).convert()
self.images["pipes"] = {}
for c in ["red", "green"]:
path = os.path.join(self._asset_dir, "pipe-%s.png" % c)
self.images["pipes"][c] = {}
self.images["pipes"][c]["lower"] = pygame.image.load(
path).convert_alpha()
self.images["pipes"][c]["upper"] = pygame.transform.rotate(
self.images["pipes"][c]["lower"], 180)
path = os.path.join(self._asset_dir, "base.png")
self.images["base"] = pygame.image.load(path).convert()
def init(self):
if self.backdrop is None:
self.backdrop = Backdrop(
self.width,
self.height,
self.images["background"]["day"],
self.images["base"],
self.scale
)
if self.player is None:
self.player = BirdPlayer(
self.width,
self.height,
self.init_pos,
self.images["player"],
self.rng,
color="red",
scale=self.scale
)
if self.pipe_group is None:
self.pipe_group = pygame.sprite.Group([
self._generatePipes(offset=-75),
self._generatePipes(offset=-75 + self.width / 2),
self._generatePipes(offset=-75 + self.width * 1.5)
])
color = self.rng.choice(["day", "night"])
self.backdrop.background_image = self.images["background"][color]
# instead of recreating
color = self.rng.choice(["red", "blue", "yellow"])
self.player.init(self.init_pos, color)
self.pipe_color = self.rng.choice(["red", "green"])
for i, p in enumerate(self.pipe_group):
self._generatePipes(offset=self.pipe_offsets[i], pipe=p)
self.score = 0.0
self.lives = 1
self.game_tick = 0
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player y position.
* players velocity.
* next pipe distance to player
* next pipe top y position
* next pipe bottom y position
* next next pipe distance to player
* next next pipe top y position
* next next pipe bottom y position
See code for structure.
"""
pipes = []
for p in self.pipe_group:
if p.x + p.width/2 > self.player.pos_x :
pipes.append((p, p.x + p.width/2 - self.player.pos_x ))
pipes.sort(key=lambda p: p[1])
next_pipe = pipes[1][0]
next_next_pipe = pipes[0][0]
if next_next_pipe.x < next_pipe.x:
next_pipe, next_next_pipe = next_next_pipe, next_pipe
state = {
"player_y": self.player.pos_y,
"player_vel": self.player.vel,
"next_pipe_dist_to_player": next_pipe.x + next_pipe.width/2 - self.player.pos_x ,
"next_pipe_top_y": next_pipe.gap_start,
"next_pipe_bottom_y": next_pipe.gap_start + self.pipe_gap,
"next_next_pipe_dist_to_player": next_next_pipe.x + next_next_pipe.width/2 - self.player.pos_x ,
"next_next_pipe_top_y": next_next_pipe.gap_start,
"next_next_pipe_bottom_y": next_next_pipe.gap_start + self.pipe_gap
}
return state
def getScore(self):
return self.score
def _generatePipes(self, offset=0, pipe=None):
start_gap = self.rng.random_integers(
self.pipe_min,
self.pipe_max
)
if pipe is None:
pipe = Pipe(
self.width,
self.height,
start_gap,
self.pipe_gap,
self.images["pipes"],
self.scale,
color=self.pipe_color,
offset=offset
)
return pipe
else:
pipe.init(start_gap, self.pipe_gap, offset, self.pipe_color)
def _handle_player_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions['up']:
self.player.flap()
def game_over(self):
return self.lives <= 0
def step(self, dt):
self.game_tick += 1
dt = dt / 1000.0
self.score += self.rewards["tick"]
# handle player movement
self._handle_player_events()
for p in self.pipe_group:
hit = pygame.sprite.spritecollide(
self.player, self.pipe_group, False)
is_in_pipe = (p.x - p.width/2 - 20) <= self.player.pos_x < (p.x + p.width/2)
for h in hit: # do check to see if its within the gap.
top_pipe_check = (
(self.player.pos_y - self.player.height/2 + 12) <= h.gap_start) and is_in_pipe
bot_pipe_check = (
(self.player.pos_y +
self.player.height) > h.gap_start +
self.pipe_gap) and is_in_pipe
if top_pipe_check:
self.lives -= 1
if bot_pipe_check:
self.lives -= 1
# is it past the player?
if (p.x - p.width / 2) <= self.player.pos_x < (p.x - p.width / 2 + 4):
self.score += self.rewards["positive"]
# is out out of the screen?
if p.x < -p.width:
self._generatePipes(offset=self.width * 0.2, pipe=p)
# fell on the ground
if self.player.pos_y >= 0.79 * self.height - self.player.height:
self.lives -= 1
# went above the screen
if self.player.pos_y <= 0:
self.lives -= 1
self.player.update(dt)
self.pipe_group.update(dt)
if self.lives <= 0:
self.score += self.rewards["loss"]
self.backdrop.draw_background(self.screen)
self.pipe_group.draw(self.screen)
self.backdrop.update_draw_base(self.screen, dt)
self.player.draw(self.screen)
| 13,508 | 29.632653 | 144 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/examples/example_doom.py | import numpy as np
from ple import PLE
from ple.games import Doom
class NaiveAgent():
"""
This is our naive agent. It picks actions at random!
"""
def __init__(self, actions):
self.actions = actions
def pickAction(self, reward, obs):
return self.actions[np.random.randint(0, len(self.actions))]
###################################
game = Doom(scenario="take_cover")
env = PLE(game)
agent = NaiveAgent(env.getActionSet())
env.init()
reward = 0.0
for f in range(15000):
#if the game is over
if env.game_over():
env.reset_game()
action = agent.pickAction(reward, env.getScreenRGB())
reward = env.act(action)
if f > 2000:
env.display_screen = True
env.force_fps = False
if f > 2250:
env.display_screen = True
env.force_fps = True
| 867 | 21.842105 | 62 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/examples/keras_nonvis.py | # thanks to @edersantana and @fchollet for suggestions & help.
import numpy as np
from ple import PLE # our environment
from ple.games.catcher import Catcher
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from example_support import ExampleAgent, ReplayMemory, loop_play_forever
class Agent(ExampleAgent):
"""
Our agent takes 1D inputs which are flattened.
We define a full connected model below.
"""
def __init__(self, *args, **kwargs):
ExampleAgent.__init__(self, *args, **kwargs)
self.state_dim = self.env.getGameStateDims()
self.state_shape = np.prod((num_frames,) + self.state_dim)
self.input_shape = (batch_size, self.state_shape)
def build_model(self):
model = Sequential()
model.add(Dense(
input_dim=self.state_shape, output_dim=256, activation="relu", init="he_uniform"
))
model.add(Dense(
512, activation="relu", init="he_uniform"
))
model.add(Dense(
self.num_actions, activation="linear", init="he_uniform"
))
model.compile(loss=self.q_loss, optimizer=SGD(lr=self.lr))
self.model = model
def nv_state_preprocessor(state):
"""
This preprocesses our state from PLE. We rescale the values to be between
0,1 and -1,1.
"""
# taken by inspection of source code. Better way is on its way!
max_values = np.array([128.0, 20.0, 128.0, 128.0])
state = np.array([state.values()]) / max_values
return state.flatten()
if __name__ == "__main__":
# this takes about 15 epochs to converge to something that performs decently.
# feel free to play with the parameters below.
# training parameters
num_epochs = 15
num_steps_train = 15000 # steps per epoch of training
num_steps_test = 3000
update_frequency = 4 # step frequency of model training/updates
# agent settings
batch_size = 32
num_frames = 4 # number of frames in a 'state'
frame_skip = 2
# percentage of time we perform a random action, help exploration.
epsilon = 0.15
epsilon_steps = 30000 # decay steps
epsilon_min = 0.1
lr = 0.01
discount = 0.95 # discount factor
rng = np.random.RandomState(24)
# memory settings
max_memory_size = 100000
min_memory_size = 1000 # number needed before model training starts
epsilon_rate = (epsilon - epsilon_min) / epsilon_steps
# PLE takes our game and the state_preprocessor. It will process the state
# for our agent.
game = Catcher(width=128, height=128)
env = PLE(game, fps=60, state_preprocessor=nv_state_preprocessor)
agent = Agent(env, batch_size, num_frames, frame_skip, lr,
discount, rng, optimizer="sgd_nesterov")
agent.build_model()
memory = ReplayMemory(max_memory_size, min_memory_size)
env.init()
for epoch in range(1, num_epochs + 1):
steps, num_episodes = 0, 0
losses, rewards = [], []
env.display_screen = False
# training loop
while steps < num_steps_train:
episode_reward = 0.0
agent.start_episode()
while env.game_over() == False and steps < num_steps_train:
state = env.getGameState()
reward, action = agent.act(state, epsilon=epsilon)
memory.add([state, action, reward, env.game_over()])
if steps % update_frequency == 0:
loss = memory.train_agent_batch(agent)
if loss is not None:
losses.append(loss)
epsilon = np.max(epsilon_min, epsilon - epsilon_rate)
episode_reward += reward
steps += 1
if num_episodes % 5 == 0:
print "Episode {:01d}: Reward {:0.1f}".format(num_episodes, episode_reward)
rewards.append(episode_reward)
num_episodes += 1
agent.end_episode()
print "\nTrain Epoch {:02d}: Epsilon {:0.4f} | Avg. Loss {:0.3f} | Avg. Reward {:0.3f}".format(epoch, epsilon, np.mean(losses), np.sum(rewards) / num_episodes)
steps, num_episodes = 0, 0
losses, rewards = [], []
# display the screen
env.display_screen = True
# slow it down so we can watch it fail!
env.force_fps = False
# testing loop
while steps < num_steps_test:
episode_reward = 0.0
agent.start_episode()
while env.game_over() == False and steps < num_steps_test:
state = env.getGameState()
reward, action = agent.act(state, epsilon=0.05)
episode_reward += reward
steps += 1
# done watching after 500 steps.
if steps > 500:
env.force_fps = True
env.display_screen = False
if num_episodes % 5 == 0:
print "Episode {:01d}: Reward {:0.1f}".format(num_episodes, episode_reward)
rewards.append(episode_reward)
num_episodes += 1
agent.end_episode()
print "Test Epoch {:02d}: Best Reward {:0.3f} | Avg. Reward {:0.3f}".format(epoch, np.max(rewards), np.sum(rewards) / num_episodes)
print "\nTraining complete. Will loop forever playing!"
loop_play_forever(env, agent)
| 5,449 | 31.634731 | 167 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/examples/example_support.py | import numpy as np
from collections import deque
# keras and model related
from keras.models import Sequential
from keras.layers.core import Dense, Flatten
from keras.layers.convolutional import Convolution2D
from keras.optimizers import SGD, Adam, RMSprop
import theano.tensor as T
class ExampleAgent():
"""
Implements a DQN-ish agent. It has replay memory and epsilon decay. It is missing model freezing. The models are sensitive to the parameters and if applied to other games must be tinkered with.
"""
def __init__(self, env, batch_size, num_frames,
frame_skip, lr, discount, rng, optimizer="adam", frame_dim=None):
self.env = env
self.batch_size = batch_size
self.num_frames = num_frames
self.frame_skip = frame_skip
self.lr = lr
self.discount = discount
self.rng = rng
if optimizer == "adam":
opt = Adam(lr=self.lr)
elif optimizer == "sgd":
opt = SGD(lr=self.lr)
elif optimizer == "sgd_nesterov":
opt = SGD(lr=self.lr, nesterov=True)
elif optimizer == "rmsprop":
opt = RMSprop(lr=self.lr, rho=0.9, epsilon=0.003)
else:
raise ValueError("Unrecognized optmizer")
self.optimizer = opt
self.frame_dim = self.env.getScreenDims() if frame_dim is None else frame_dim
self.state_shape = (num_frames,) + self.frame_dim
self.input_shape = (batch_size,) + self.state_shape
self.state = deque(maxlen=num_frames)
self.actions = self.env.getActionSet()
self.num_actions = len(self.actions)
self.model = None
def q_loss(self, y_true, y_pred):
# assume clip_delta is 1.0
# along with sum accumulator.
diff = y_true - y_pred
_quad = T.minimum(abs(diff), 1.0)
_lin = abs(diff) - _quad
loss = 0.5 * _quad ** 2 + _lin
loss = T.sum(loss)
return loss
def build_model(self):
model = Sequential()
model.add(Convolution2D(
16, 8, 8, input_shape=(self.num_frames,) + self.frame_dim,
subsample=(4, 4), activation="relu", init="he_uniform"
))
model.add(Convolution2D(
16, 4, 4, subsample=(2, 2), activation="relu", init="he_uniform"
))
model.add(Convolution2D(
32, 3, 3, subsample=(1, 1), activation="relu", init="he_uniform"
))
model.add(Flatten())
model.add(Dense(
512, activation="relu", init="he_uniform"
))
model.add(Dense(
self.num_actions, activation="linear", init="he_uniform"
))
model.compile(loss=self.q_loss, optimizer=self.optimizer)
self.model = model
def predict_single(self, state):
"""
model is expecting a batch_size worth of data. We only have one states worth of
samples so we make an empty batch and set our state as the first row.
"""
states = np.zeros(self.input_shape)
states[0, ...] = state.reshape(self.state_shape)
return self.model.predict(states)[0] # only want the first value
def _argmax_rand(self, arr):
# picks a random index if there is a tie
return self.rng.choice(np.where(arr == np.max(arr))[0])
def _best_action(self, state):
q_vals = self.predict_single(state)
return self._argmax_rand(q_vals) # the action with the best Q-value
def act(self, state, epsilon=1.0):
self.state.append(state)
action = self.rng.randint(0, self.num_actions)
if len(self.state) == self.num_frames: # we havent seen enough frames
_state = np.array(self.state)
if self.rng.rand() > epsilon:
action = self._best_action(_state) # exploit
reward = 0.0
for i in range(self.frame_skip): # we repeat each action a few times
# act on the environment
reward += self.env.act(self.actions[action])
reward = np.clip(reward, -1.0, 1.0)
return reward, action
def start_episode(self, N=3):
self.env.reset_game() # reset
for i in range(self.rng.randint(N)):
self.env.act(self.env.NOOP) # perform a NOOP
def end_episode(self):
self.state.clear()
class ReplayMemory():
def __init__(self, max_size, min_size):
self.min_replay_size = min_size
self.memory = deque(maxlen=max_size)
def __len__(self):
return len(self.memory)
def add(self, transition):
self.memory.append(transition)
def train_agent_batch(self, agent):
if len(self.memory) > self.min_replay_size:
states, targets = self._random_batch(agent) # get a random batch
return agent.model.train_on_batch(states, targets) # ERR?
else:
return None
def _random_batch(self, agent):
inputs = np.zeros(agent.input_shape)
targets = np.zeros((agent.batch_size, agent.num_actions))
seen = []
idx = agent.rng.randint(
0,
high=len(
self.memory) -
agent.num_frames -
1)
for i in range(agent.batch_size):
while idx in seen:
idx = agent.rng.randint(0, high=len(
self.memory) - agent.num_frames - 1)
states = np.array([self.memory[idx + j][0]
for j in range(agent.num_frames + 1)])
art = np.array([self.memory[idx + j][1:]
for j in range(agent.num_frames)])
actions = art[:, 0].astype(int)
rewards = art[:, 1]
terminals = art[:, 2]
state = states[:-1]
state_next = states[1:]
inputs[i, ...] = state.reshape(agent.state_shape)
# we could make zeros but pointless.
targets[i] = agent.predict_single(state)
Q_prime = np.max(agent.predict_single(state_next))
targets[i, actions] = rewards + \
(1 - terminals) * (agent.discount * Q_prime)
seen.append(idx)
return inputs, targets
def loop_play_forever(env, agent):
# our forever play loop
try:
# slow it down
env.display_screen = True
env.force_fps = False
while True:
agent.start_episode()
episode_reward = 0.0
while env.game_over() == False:
state = env.getGameState()
reward, action = agent.act(state, epsilon=0.05)
episode_reward += reward
print "Agent score {:0.1f} reward for episode.".format(episode_reward)
agent.end_episode()
except KeyboardInterrupt:
print "Exiting out!"
| 6,844 | 30.837209 | 201 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/examples/random_agent.py | import numpy as np
from ple import PLE
from ple.games.raycastmaze import RaycastMaze
class NaiveAgent():
"""
This is our naive agent. It picks actions at random!
"""
def __init__(self, actions):
self.actions = actions
def pickAction(self, reward, obs):
return self.actions[np.random.randint(0, len(self.actions))]
###################################
game = RaycastMaze(
map_size=6
) # create our game
fps = 30 # fps we want to run at
frame_skip = 2
num_steps = 1
force_fps = False # slower speed
display_screen = True
reward = 0.0
max_noops = 20
nb_frames = 15000
# make a PLE instance.
p = PLE(game, fps=fps, frame_skip=frame_skip, num_steps=num_steps,
force_fps=force_fps, display_screen=display_screen)
# our Naive agent!
agent = NaiveAgent(p.getActionSet())
# init agent and game.
p.init()
# lets do a random number of NOOP's
for i in range(np.random.randint(0, max_noops)):
reward = p.act(p.NOOP)
# start our training loop
for f in range(nb_frames):
# if the game is over
if p.game_over():
p.reset_game()
obs = p.getScreenRGB()
action = agent.pickAction(reward, obs)
reward = p.act(action)
if f % 50 == 0:
p.saveScreen("screen_capture.png")
| 1,263 | 20.793103 | 68 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/examples/scaling_rewards.py | import numpy as np
from ple import PLE
from ple.games.waterworld import WaterWorld
# lets adjust the rewards our agent recieves
rewards = {
"tick": -0.01, # each time the game steps forward in time the agent gets -0.1
"positive": 1.0, # each time the agent collects a green circle
"negative": -5.0, # each time the agent bumps into a red circle
}
# make a PLE instance.
# use lower fps so we can see whats happening a little easier
game = WaterWorld(width=256, height=256, num_creeps=8)
p = PLE(game, fps=15, force_fps=False, display_screen=True,
reward_values=rewards)
# we pass in the rewards and PLE will adjust the game for us
p.init()
actions = p.getActionSet()
for i in range(1000):
if p.game_over():
p.reset_game()
action = actions[np.random.randint(0, len(actions))] # random actions
reward = p.act(action)
print "Score: {:0.3f} | Reward: {:0.3f} ".format(p.score(), reward)
| 937 | 30.266667 | 82 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/tests/test_ple_doom.py | #!/usr/bin/python
import nose
import nose
import numpy as np
import unittest
class NaiveAgent():
def __init__(self, actions):
self.actions = actions
def pickAction(self, reward, obs):
return self.actions[np.random.randint(0, len(self.actions))]
class MyTestCase(unittest.TestCase):
def run_a_game(self,game):
from ple import PLE
p = PLE(game)
agent = NaiveAgent(p.getActionSet())
p.init()
reward = p.act(p.NOOP)
for i in range(50):
obs = p.getScreenRGB()
reward = p.act(agent.pickAction(reward,obs))
def test_doom(self):
from ple.games.doom import Doom
game = Doom()
self.run_a_game(game)
if __name__ == "__main__":
nose.runmodule()
| 779 | 18.5 | 68 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/tests/test_ple.py | #!/usr/bin/python
"""
This tests that all the PLE games launch, except for doom; we
explicitly check that it isn't defined.
"""
import nose
import numpy as np
import unittest
NUM_STEPS=150
class NaiveAgent():
def __init__(self, actions):
self.actions = actions
def pickAction(self, reward, obs):
return self.actions[np.random.randint(0, len(self.actions))]
class MyTestCase(unittest.TestCase):
def run_a_game(self,game):
from ple import PLE
p = PLE(game,display_screen=True)
agent = NaiveAgent(p.getActionSet())
p.init()
reward = p.act(p.NOOP)
for i in range(NUM_STEPS):
obs = p.getScreenRGB()
reward = p.act(agent.pickAction(reward,obs))
def test_catcher(self):
from ple.games.catcher import Catcher
game = Catcher()
self.run_a_game(game)
def test_monsterkong(self):
from ple.games.monsterkong import MonsterKong
game = MonsterKong()
self.run_a_game(game)
def test_flappybird(self):
from ple.games.flappybird import FlappyBird
game = FlappyBird()
self.run_a_game(game)
def test_pixelcopter(self):
from ple.games.pixelcopter import Pixelcopter
game = Pixelcopter()
self.run_a_game(game)
def test_puckworld(self):
from ple.games.puckworld import PuckWorld
game = PuckWorld()
self.run_a_game(game)
def test_raycastmaze(self):
from ple.games.raycastmaze import RaycastMaze
game = RaycastMaze()
self.run_a_game(game)
def test_snake(self):
from ple.games.snake import Snake
game = Snake()
self.run_a_game(game)
def test_waterworld(self):
from ple.games.waterworld import WaterWorld
game = WaterWorld()
self.run_a_game(game)
def test_pong(self):
from ple.games.pong import Pong
game = Pong()
self.run_a_game(game)
def test_doom_not_defined(self):
from nose.tools import assert_raises
def invoke_doom():
DoomWrapper
assert_raises(NameError,invoke_doom)
if __name__ == "__main__":
nose.runmodule()
| 2,211 | 23.043478 | 68 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/docs/conf.py | import sys
import os
from mock import Mock
sys.modules['pygame'] = Mock()
sys.modules['pygame.constants'] = Mock()
#so we can import ple
sys.path.append(os.path.join(os.path.dirname(__name__), ".."))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc'
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'PyGame Learning Environment'
copyright = u'2016, Norman Tasfi'
author = u'Norman Tasfi'
import ple
version = u'0.1.dev1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.dev1'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
#from lasagne!
if os.environ.get('READTHEDOCS') != 'True':
try:
import sphinx_rtd_theme
except ImportError:
pass # assume we have sphinx >= 1.3
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'PyGameLearningEnvironmentdoc'
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyGameLearningEnvironment.tex', u'PyGame Learning Environment Documentation',
u'Norman Tasfi', 'manual'),
]
man_pages = [
(master_doc, 'pygamelearningenvironment', u'PyGame Learning Environment Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'PyGameLearningEnvironment', u'PyGame Learning Environment Documentation',
author, 'PyGameLearningEnvironment', 'RL for all.',
'Miscellaneous'),
]
| 1,940 | 22.962963 | 95 | py |
Intraclass-clustering-measures | Intraclass-clustering-measures-main/kendal_coefficient.py | import math as m
import numpy as np
np.random.seed(1)
def kendall_coeff(metric_values,test_performances):
set_size = len(metric_values)
coeff = 0
count = 0
for m1,t1 in zip(metric_values,test_performances):
for m2,t2 in zip(metric_values,test_performances):
if (m1,t1)!=(m2,t2):
coeff += m.copysign(1,m1-m2)*m.copysign(1,t1-t2)
count += 1
return coeff/count
def granulated_kendall_coeff(metric_values,test_performances,params):
nb_hyperparams = len(params[0].keys())
granulated_coeffs = {}
# loop over hyperparameter axes
for hyperparam in params[0].keys():
params_seen = [0 for i in range(len(params))]
coeffs = []
for i,(m1,t1,param1,seen1) in enumerate(zip(metric_values,test_performances,params,params_seen)):
if seen1==0:
# collect params with same hyperparameter values on all but current hyperparam
metric_values_set = [m1]
test_performances_set = [t1]
params_seen[i]=1
for j,(m2,t2,param2,seen2) in enumerate(zip(metric_values,test_performances,params,params_seen)):
if seen2==0:
# check hyperparameter equivalence
check_sum = 0
for key,value in param2.items():
if key!=hyperparam and param1[key]==value:
check_sum+=1
if check_sum==nb_hyperparams-1:
metric_values_set.append(m2)
test_performances_set.append(t2)
params_seen[j]=1
if len(metric_values_set)>1:
coeffs.append(kendall_coeff(metric_values_set,test_performances_set))
granulated_coeffs.update({hyperparam:sum(coeffs)/len(coeffs)})
return granulated_coeffs
| 1,999 | 39.816327 | 113 | py |
Intraclass-clustering-measures | Intraclass-clustering-measures-main/measures.py | '''
Measures of intraclass clustering ability and generalization
'''
import sys
sys.path.insert(0, "../")
import warnings
import numpy as np
from scipy.spatial.distance import cosine
from sklearn.metrics import silhouette_score, silhouette_samples, calinski_harabasz_score
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from keras import losses
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Activation
from keras.constraints import Constraint
from keras.optimizers import SGD
from keras.callbacks import LearningRateScheduler, Callback
from keras.engine.training_arrays import predict_loop, test_loop
from keras.preprocessing.image import ImageDataGenerator
from utils_training import history_todict, lr_schedule
def model_extract_tensors(model,input_data,tensors,batch_size=128, training_phase = 0):
input_tensors = [model.inputs[0], # input data
K.learning_phase()] # train or test mode
f = K.function(inputs=input_tensors, outputs=tensors)
# last element of inputs is not sliced in batches thanks to keras :)
inputs = [input_data, training_phase]
outputs = predict_loop(model,f, inputs, batch_size = batch_size, verbose = 0)
return outputs
def collect_activations(model,x,batch_size = 128,training_phase = 0,preact=False):
# collect activation layers
relu_outputs = []
for layer in model.layers[:-1]:
if ('relu' in layer.name) or isinstance(layer,Activation) or isinstance(layer,ControllableReLU):
representation = layer.input if preact else layer.output
if len(layer.input_shape)==2:
relu_outputs.append(representation)
elif len(layer.input_shape)==4:
relu_outputs.append(K.max(representation,axis = [1,2])) # global max pooling
# extract relu activations
activations = model_extract_tensors(model,x,relu_outputs,batch_size=batch_size,training_phase = training_phase)
if not isinstance(activations,list):
activations = [activations]
return activations
def evaluate_in_training_mode(model,x,y,sample_weights = None, batch_size = 128, verbose = 0):
if sample_weights == None:
sample_weights = np.ones((x.shape[0],),np.float32)
ins = [x, y, sample_weights, 1]
model._make_test_function()
f = model.test_function
return test_loop(model, f, ins,
batch_size=batch_size,
verbose=verbose)
def blackbox_subclass(model,x,y,suby, batch_size = 128,training_phase = 0,data_subset = 1.,agg = 'max'):
'''
measure c_0
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y,suby = x[subset],y[subset],suby[subset]
# class - subclass correspondence (nbclasses,nbsubclasses)
# 1: all samples from a subclass are in a given class, 0: no samples from a subclass are in a given class
correspondence = np.dot(y.T,suby)/suby.sum(axis = 0)
metric_per_subclass = []
for subclass_index in range(suby.shape[1]):
class_index = np.argmax(correspondence[:,subclass_index])
samples_subclass = suby[:,subclass_index].astype(bool)
# selects samples from the class to which the subclass belongs
samples_class = y[:,class_index]
# remove samples from the subclass
samples_class = (samples_class-samples_class*samples_subclass).astype(bool)
x_subclass = x[samples_subclass]
x_subclass_shuffled = x_subclass[np.random.permutation(len(x_subclass))]
x_class = x[samples_class]
x_class = x_class[np.random.permutation(len(x_class))[:len(x_subclass)]]
scores = []
for x1,x2 in [(x_subclass,x_subclass_shuffled),(x_subclass,x_class)]:
interpolation_factors = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
outs = []
for factor in interpolation_factors:
# interpolate between samples from x1 and x2
preds = model.predict(x1*factor + x2*(1-factor), batch_size=batch_size)
# For each interpolation point, record prediction on the correct class
outs.append(preds[:,class_index])
if agg == 'max':
# for each pair of samples, compute maximum deviation from perfect prediction along the interpolation points
# average over all pairs of samples
scores.append(np.mean(np.max(1-np.array(outs),axis = 0)))
elif agg == 'sum':
scores.append(np.mean(np.sum(1-np.array(outs),axis = 0)))
else:
raise ValueError('agg argument wrongly specified. Should be either max or sum.')
# compare results for pairs of samples inside the same subclass, versus
# pairs of samples from different subclasses (but still in the same class)
metric_per_subclass.append(scores[1]/scores[0])
return np.median(np.array(metric_per_subclass))
def neural_subclass_selectivity(model,x,y,suby, batch_size = 128,training_phase = 0,
layerwise = False,subclass_agg='median', neuron_agg ='max',data_subset = 1.,preact=True):
'''
measure c_1
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y,suby = x[subset],y[subset],suby[subset]
# collect activations
activations = collect_activations(model,x,batch_size,training_phase,preact=preact)
# class - subclass correspondence (nbclasses,nbsubclasses)
# 1: all samples from a subclass are in a given class, 0: no samples from a subclass are in a given class
correspondence = np.dot(y.T,suby)/suby.sum(axis = 0)
subclass_selectivity = []
for subclass in range(suby.shape[1]):
samples_subclass = suby[:,subclass].astype(bool)
# selects samples from the class to which the subclass belongs
samples_class = y[:,np.argmax(correspondence[:,subclass])]
# remove samples from the subclass
samples_class = (samples_class-samples_class*suby[:,subclass]).astype(bool)
subclass_selectivity_neurons = []
for layer,acts in enumerate(activations):
mean_subclass = np.mean(acts[samples_subclass],axis = 0)
std_subclass = np.std(acts[samples_subclass],axis = 0)
mean_class = np.mean(acts[samples_class],axis = 0)
std_class = np.std(acts[samples_class],axis = 0)
selectivity = (mean_subclass-mean_class) / (std_subclass + std_class+1e-7)
# ignore dead neurons
selectivity = selectivity*(1-np.all(acts<0.,axis = 0))
subclass_selectivity_neurons.append(selectivity)
if not layerwise:
# concatenate neurons of all layers
subclass_selectivity_neurons = [np.concatenate(subclass_selectivity_neurons)]
if neuron_agg == 'max':
# max over neurons
subclass_selectivity.append([np.max(l) for l in subclass_selectivity_neurons])
if neuron_agg == 'topk':
# mean of topk neurons. k is such that (nb_neurons/nb_subclasses) neurons are selected
subclass_selectivity.append([])
for l in subclass_selectivity_neurons:
k = max(round(len(l)/suby.shape[1]),1) # k should be at least 1
subclass_selectivity[-1].append( np.mean(np.partition(l,-k)[-k:]) ) # mean of top k
# dimensions should be (nb_subclasses, nb_layers) if layerwise or (nb_subclasses,1) if not layerwise
subclass_selectivity = np.array(subclass_selectivity)
if subclass_agg == 'mean':
selectivity = np.mean(subclass_selectivity,axis=0)
elif subclass_agg == 'median':
selectivity = np.median(subclass_selectivity,axis=0)
elif subclass_agg == 'max':
selectivity = np.max(subclass_selectivity,axis=0)
return selectivity
def layer_subclass_clustering(model,x,y,suby, batch_size = 128,training_phase = 0, data_subset = 1.,layerwise = False,subclass_agg='median',preact=False):
'''
measure c_2
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y,suby = x[subset],y[subset],suby[subset]
# collect activations
activations = collect_activations(model,x,batch_size,training_phase,preact=preact)
if preact:
for i,act in enumerate(activations):
act = (act-np.mean(act,axis=0)) / np.std(act,axis = 0)
# percentile is computed such that at least 10 neurons are activated by each sample in each layer on average
percentile = min(round(100-100*10/act.shape[1]) , 75)
thres = np.percentile(act,percentile,axis = 0,keepdims = True)
activations[i] = np.maximum(act-thres,0)
# class - subclass correspondence (nbclasses,nbsubclasses)
# 1: all samples from a subclass are in a given class, 0: no samples from a subclass are in a given class
correspondence = np.dot(y.T,suby)/suby.sum(axis = 0)
subclass_clustering_per_layer = []
for layer,acts in enumerate(activations):
subclass_clustering_per_layer.append([])
for c in range(y.shape[1]):
samples_class = y[:,c].astype(bool)
# provides a silhouette score per sample
score = silhouette_samples(acts[samples_class],
np.where(suby[samples_class][:,(correspondence[c]>0.).astype(bool)])[1],
metric='cosine')
for subclass in np.where(correspondence[c]>0.)[0]:
# compute mean silhouette score for each subclass
subclass_clustering_per_layer[-1].append(np.mean(score[suby[samples_class][:,subclass].astype(bool)]))
# dimensions should be (nb_layers, nb_subclasses)
subclass_clustering_per_layer = np.array(subclass_clustering_per_layer)
if not layerwise:
# max over layers
subclass_clustering = np.max(subclass_clustering_per_layer,axis = 0)
else:
subclass_clustering = subclass_clustering_per_layer
if subclass_agg == 'mean':
subclass_clustering = np.mean(subclass_clustering,axis = -1)
elif subclass_agg == 'median':
subclass_clustering = np.median(subclass_clustering,axis = -1)
elif subclass_agg == 'max':
subclass_clustering = np.max(subclass_clustering,axis = -1)
return subclass_clustering
def neural_intraclass_selectivity(model,x,y,batch_size = 128,training_phase = 0, data_subset = 1.,layerwise = False,subclass_agg='mean',preact=True, k_neuron=None,not_all = False):
'''
measure c_3
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y = x[subset],y[subset]
activations = collect_activations(model,x,batch_size=batch_size,training_phase=training_phase,preact=preact)
# pre-compute neuron-wise std on the data
stds_all = []
for layer,acts in enumerate(activations):
stds_all.append(np.std(acts,axis = 0))
# compute neural selectivity for each layer
subclass_selectivity = []
for c in range(y.shape[1]):
# selects samples from the class
samples_class = y[:,c].astype(bool)
subclass_selectivity_layer = []
for layer,acts in enumerate(activations):
# mean_class = np.mean(acts[samples_class],axis = 0)
std_class = np.std(acts[samples_class],axis = 0)
if not_all:
std_all = np.std(acts[(1-samples_class).astype(bool)],axis = 0)
else:
std_all = stds_all[layer]
selectivity = std_class / (std_all+1e-7)
# ignore dead neurons
selectivity = selectivity*(1-np.all(acts<0.,axis = 0))
subclass_selectivity_layer.append(selectivity)
if not layerwise:
# concatenate neurons of all layers
subclass_selectivity_layer = [np.concatenate(subclass_selectivity_layer)]
# mean of topk neurons.
subclass_selectivity.append([])
for l in subclass_selectivity_layer:
if k_neuron == None:
# k_neuron is such that (nb_neurons/nb_classes) neurons are selected
k_neuron = max(round(len(l)/y.shape[1]),1) # k should be at least 1
subclass_selectivity[-1].append( np.mean(np.partition(l,-k_neuron)[-k_neuron:]) ) # mean of top k
# dimensions should be (nb_subclasses, nb_layers) if layerwise or (nb_subclasses,1) if not layerwise
subclass_selectivity = np.array(subclass_selectivity)
if subclass_agg == 'mean':
selectivity = np.mean(subclass_selectivity,axis=0)
elif subclass_agg == 'median':
selectivity = np.median(subclass_selectivity,axis=0)
elif subclass_agg == 'max':
selectivity = np.max(subclass_selectivity,axis=0)
return selectivity
def layer_intraclass_clustering(model,x,y,batch_size = 128,training_phase = 0, data_subset = 1.,layerwise = False,subclass_agg='mean',preact=True,k_layer = 1):
'''
measure c_4
'''
# if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
# x,y = x[subset],y[subset]
# collect activations
activations = collect_activations(model,x,batch_size=batch_size,training_phase=training_phase,preact=preact)
for i,act in enumerate(activations):
# ignore dead neurons
act = act[:,~np.all(act<0.,axis = 0)]
act = (act-np.mean(act,axis=0)) / (np.std(act,axis = 0)+1e-7)
if act.shape[1]!=0:
percentile = max(min(round(100-100*10/act.shape[1]) , 75),0)
thres = np.percentile(act,percentile,axis = 0,keepdims = True)
activations[i] = np.maximum(act-thres,0)
else:
del activations[i]
subclass_clustering_per_layer = []
for layer,acts in enumerate(activations):
dists = cosine_distances(acts[subset])
std_all = np.std(dists)
# mean_all = np.mean(dists)
subclass_clustering_per_layer.append([])
for c in range(y.shape[1]):
samples_class = y[:,c].astype(bool)
dists = cosine_distances(acts[samples_class])
std_class = np.std(dists)
# mean_class = np.mean(dists)
selectivity = std_class / (std_all+1e-7)
subclass_clustering_per_layer[-1].append(selectivity)
# dimensions should be (nb_layers, nb_classes)
subclass_clustering_per_layer = np.array(subclass_clustering_per_layer)
if not layerwise:
# mean over topk layers
subclass_clustering = np.mean(np.sort(subclass_clustering_per_layer,axis = 0)[-k_layer:,:],axis=0)
else:
subclass_clustering = subclass_clustering_per_layer
if subclass_agg == 'mean':
subclass_clustering = np.mean(subclass_clustering,axis = -1)
elif subclass_agg == 'median':
subclass_clustering = np.median(subclass_clustering,axis = -1)
elif subclass_agg == 'max':
subclass_clustering = np.max(subclass_clustering,axis = -1)
return subclass_clustering
def sharpness_random(model,x,y, data_subset = .1, epsilon_weight_scale = 1e-3, nb_samplings = 10,
kernel_only = False, training_phase = 0, batch_size = 300):
'''
training_phase=1 is useful to use batchstatistics with batchnorm. But be careful to remove dropout layers!
code adapted from NeurIPS "predicting generalization in deep learning" competition starting kit
https://competitions.codalab.org/competitions/25301
'''
model.compile(loss='categorical_crossentropy',
optimizer=SGD(0.),
metrics=['accuracy'])
# collect trainable weights and their original values
weights = model.trainable_weights
weights_orig = [K.get_value(w) for w in weights]
# m represents the bounds for the weights perturbation
# m will be optimized such that optimizing within these bounds reaches the target deviate
# for this optimization, h and l represent high and low tentative values of m and a bisectional method is used
h, l = 2.0, 0.000000
target_accuracy = 0.9
for i in range(20): # loop to find perturbation scale
m = (h + l) / 2. # m fixes the bounds for the weight perturbation
accuracy = 0.
for k in range(nb_samplings): # loop to estimate accuracy of perturbed model given a perturbation scale
for w,w_orig in zip(weights,weights_orig):
if not kernel_only or len(w_orig.shape)>1.: # kernels are assumed to be the only weights with more than one dimension
noisy = w_orig + np.random.normal(0.,scale = m, size=list(w_orig.shape)) * (np.abs(w_orig)+epsilon_weight_scale)
K.set_value(w,noisy)
# use subset of the data to estimate accuracy (a different subset is used for every estimation)
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
if training_phase==1:
estimate_accuracy = evaluate_in_training_mode(model,x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
elif training_phase==0:
estimate_accuracy = model.evaluate(x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
accuracy += estimate_accuracy
accuracy /=nb_samplings
if h - l < 1e-5 or abs(accuracy - target_accuracy) < 5e-3:
break
if accuracy < target_accuracy:
h = m
else:
l = m
# reset original weight values
for w,w_orig in zip(weights,weights_orig):
K.set_value(w,w_orig)
return m, accuracy - target_accuracy
class Clip(Constraint):
"""Element-wise clipping of weight tensors. Upper ad lower bounds are tensors of same shape as the weights"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __call__(self, w):
return K.minimum(K.maximum(w,self.lower_bound),self.upper_bound)
class StoppingCriteria(Callback):
'''
Callback that stops training before the announced number of epochs when some criteria are met.
'''
def __init__(self, accuracy):
'''
'''
super().__init__()
self.acc = accuracy
def on_epoch_end(self, epoch, logs=None):
if logs.get('accuracy')<= self.acc:
self.model.stop_training = True
def sharpness_worstcase(model,x,y, data_subset = .1, epsilon_weight_scale = 1e-3,
kernel_only = False, training_phase = 0,
batch_size = 128, epochs = 5,lr = 1.,noise = False):
'''
dropout layers should be removed!
code adapted from NeurIPS "predicting generalization in deep learning" competition starting kit
https://competitions.codalab.org/competitions/25301
'''
orig_weights = model.get_weights()
# collect weights and their initial values
# prepare upper and lower bounds
weights = model.trainable_weights
weights_orig = [K.get_value(w) for w in weights]
weight_upper_bounds = [K.variable(K.get_value(w)) for w in weights]
weight_lower_bounds = [K.variable(K.get_value(w)) for w in weights]
for w,w_upper,w_lower in zip(weights, weight_upper_bounds, weight_lower_bounds):
if w.constraint is not None: # keras allows only one constraint per weight
warnings.warn("a weight constraint has been overwritten by the sharpness_worstcase() call")
w._constraint = Clip(w_lower, w_upper)
# increase the original loss
model.compile(loss = lambda y_true,y_pred: -losses.categorical_crossentropy(y_true,y_pred),
optimizer=SGD(lr),
metrics=['accuracy'])
lr_sched = LearningRateScheduler(lr_schedule(lr,0.1,[3]))
# m represents the bounds for the weights perturbation
# m will be optimized such that optimizing within these bounds reaches the target deviate
# for this optimization, h and l represent high and low tentative values of m and a bisectional method is used
h, l = .25, 0.000000
# h, l = .1, 0.000000
target_accuracy = 0.9
stop = StoppingCriteria(0.7) # training will stop if train accuracy is below 70%
for i in range(20): # loop to find perturbation scale
m = (h + l) / 2. # m fixes the bounds for the weight perturbation
nb_samplings = 3 if noise else 1
min_accuracy = 1.
for k in range(nb_samplings):
for w,w_orig,w_upper,w_lower in zip(weights, weights_orig, weight_upper_bounds, weight_lower_bounds):
if not kernel_only or len(w_orig.shape)>1.: # kernels are assumed to be the only weights with more than one dimension
if noise:
# add uniform noise to the kernels to accelerate training
noisy = w_orig+np.random.uniform(low=-m/2, high=m/2,
size=list(w_orig.shape)) * (np.abs(w_orig)+epsilon_weight_scale)
K.set_value(w,noisy)
else:
K.set_value(w,w_orig)
# set optimization constraints
K.set_value(w_lower,w_orig- m*(np.abs(w_orig)+epsilon_weight_scale))
K.set_value(w_upper,w_orig+ m*(np.abs(w_orig)+epsilon_weight_scale))
# use subset of the data to train and estimate accuracy (a different subset is used for every estimation)
datagen = ImageDataGenerator()
history = model.fit_generator(datagen.flow(x, y,batch_size=batch_size),
steps_per_epoch=50,#int(data_subset*x_train.shape[0]/batch_size),
epochs=epochs,
verbose = 0,
callbacks = [lr_sched,stop])
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
if training_phase == 1:
# evaluation is in training mode (which is good, 'cause no need to update batchnorm running statistics)
# but careful for dropout: should be disabled
accuracy = history.history['accuracy'][-1]
# accuracy = evaluate_in_training_mode(model,x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
elif training_phase == 0:
accuracy = model.evaluate(x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
min_accuracy = min(min_accuracy,accuracy) # only useful when noise = True
accuracy = min_accuracy
if h - l < 1e-5 or abs(accuracy - target_accuracy) < 5e-3:
break
if accuracy < target_accuracy:
h = m
else:
l = m
model.set_weights(orig_weights)
for w in weights:
w._constraint = None
return m, accuracy - target_accuracy,history_todict(history) | 23,997 | 45.15 | 181 | py |
Diverse-ViT | Diverse-ViT-main/main.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import warnings
warnings.filterwarnings('ignore')
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from loss_scaler import NativeScaler as NativeScaler_new
from datasets import build_dataset
from engine import evaluate, train_one_epoch_diverse
from losses import DistillationLoss
from samplers import RASampler
import models
import utils
import torch.nn as nn
from mix import Mixup_diversity
def get_args_parser():
parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
#### Diversity Regularization ####
parser.add_argument('--mixing_coef', type=float, default=0, help='')
parser.add_argument('--emb_cos_within_coef', type=float, default=0, help='')
parser.add_argument('--emb_contrast_cross_coef', type=float, default=0, help='')
parser.add_argument('--attn_cos_within_coef', type=float, default=0, help='')
parser.add_argument('--weight_mha_cond_orth_coef', type=float, default=0, help='')
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--scale_lr_flag', action='store_false')
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--teacher_eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = Mixup_diversity(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
if args.scale_lr_flag:
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
# teacher_model = create_model(
# args.teacher_model,
# pretrained=False,
# num_classes=args.nb_classes,
# global_pool='avg',
# )
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
max_accuracy = 0.0
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if 'max_acc' in checkpoint:
max_accuracy = checkpoint['max_acc']
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
if args.teacher_eval:
test_stats = evaluate(data_loader_val, teacher_model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
print('Training with diversity regularization')
train_stats = train_one_epoch_diverse(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
set_training_mode=args.finetune == '', args=args # keep in eval mode during finetuning
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
'max_acc': max_accuracy
}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if test_stats["acc1"] > max_accuracy:
checkpoint_paths = [output_dir / 'checkpoint_best.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
'max_acc': max_accuracy
}, checkpoint_path)
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 22,308 | 47.079741 | 119 | py |
Diverse-ViT | Diverse-ViT-main/losses.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implements the knowledge distillation loss
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| 2,792 | 41.969231 | 114 | py |
Diverse-ViT | Diverse-ViT-main/engine.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
"""
import sys
import math
import utils
import torch
import torch.nn as nn
from timm.data import Mixup
from losses import DistillationLoss
from typing import Iterable, Optional
from timm.utils import accuracy, ModelEma
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from reg import *
def train_one_epoch_diverse(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True, args=None):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 50
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, patch_targets, targets = mixup_fn(samples, targets)
patch_targets = patch_targets.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
outputs, h_first, h_last, attn_first = model(samples)
loss = criterion(samples, outputs[:,0], targets)
if not args.mixing_coef == 0:
loss_mix, patch_num = Loss_mixing(outputs, patch_targets)
loss = (loss + loss_mix) / patch_num
if not args.emb_cos_within_coef == 0:
loss_diverse = Loss_cosine(h_last)
loss += args.emb_cos_within_coef * loss_diverse
if not args.emb_contrast_cross_coef == 0:
loss_diverse = Loss_contrastive(h_first, h_last)
loss += args.emb_contrast_cross_coef * loss_diverse
if not args.attn_cos_within_coef == 0:
loss_diverse = Loss_cosine_attn(attn_first)
loss += args.attn_cos_within_coef * loss_diverse
if not args.weight_mha_cond_orth_coef == 0:
loss_diverse = 0
for pname, pweight in model.named_parameters():
if 'attn.qkv.weight' in pname:
dim = pweight.shape[-1]
new_weight = pweight.reshape(3, dim, dim)
qw, kw, vw = new_weight[0,:,:], new_weight[1,:,:], new_weight[2,:,:]
qloss = Loss_condition_orth_weight(qw)
loss_diverse += qloss
kloss = Loss_condition_orth_weight(kw)
loss_diverse += kloss
vloss = Loss_condition_orth_weight(vw)
loss_diverse += vloss
loss += args.weight_mha_cond_orth_coef*loss_diverse
loss_value = loss.item()
if not math.isfinite(loss_value):
print("* Loss is {}, skip current iteration".format(loss_value))
loss = torch.nan_to_num(loss)
loss_value = loss.item()
# sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| 5,354 | 39.263158 | 98 | py |
Diverse-ViT | Diverse-ViT-main/hubconf.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from models import *
dependencies = ["torch", "torchvision", "timm"]
| 138 | 22.166667 | 47 | py |
Diverse-ViT | Diverse-ViT-main/gradinit_optimizers.py | import torch
import math
import pdb
class RescaleAdam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
min_scale=0, grad_clip=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, amsgrad=amsgrad, min_scale=min_scale, grad_clip=grad_clip)
super(RescaleAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RescaleAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, is_constraint=False):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
grad_list = []
alphas = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# State initialization
amsgrad = group['amsgrad']
state = self.state[p]
if len(state) == 0:
state['alpha'] = 1.
state['init_norm'] = p.norm().item()
state['step'] = 0
state['cons_step'] = 0
# Exponential moving average of gradient values for the weight norms
state['exp_avg'] = 0
# Exponential moving average of squared gradient values for the weight norms
state['exp_avg_sq'] = 0
state['cons_exp_avg'] = 0
# state['cons_exp_avg_sq'] = 0
# if amsgrad:
# # Maintains max of all exp. moving avg. of sq. grad. values
# state['max_exp_avg_sq'] = 0
# alphas.append(state['alpha'])
curr_norm = p.data.norm().item()
if state['init_norm'] == 0 or curr_norm == 0:
# pdb.set_trace()
continue # typical for biases
grad = torch.sum(p.grad * p.data).item() * state['init_norm'] / curr_norm
# grad_list.append(grad)
if group['grad_clip'] > 0:
grad = max(min(grad, group['grad_clip']), -group['grad_clip'])
# Perform stepweight decay
# if group['weight_decay'] > 0:
# p.mul_(1 - group['lr'] * group['weight_decay'])
beta1, beta2 = group['betas']
if is_constraint:
state['cons_step'] += 1
state['cons_exp_avg'] = state['cons_exp_avg'] * beta1 + grad * (1 - beta1)
# state['cons_exp_avg_sq'] = state['cons_exp_avg_sq'] * beta2 + (grad * grad) * (1 - beta2)
steps = state['cons_step']
exp_avg = state['cons_exp_avg']
# exp_avg_sq = state['cons_exp_avg_sq']
else:
# pdb.set_trace()
state['step'] += 1
state['exp_avg'] = state['exp_avg'] * beta1 + grad * (1 - beta1)
steps = state['step']
exp_avg = state['exp_avg']
state['exp_avg_sq'] = state['exp_avg_sq'] * beta2 + (grad * grad) * (1 - beta2)
exp_avg_sq = state['exp_avg_sq']
bias_correction1 = 1 - beta1 ** steps
bias_correction2 = 1 - beta2 ** (state['cons_step'] + state['step'])
# Decay the first and second moment running average coefficient
# if amsgrad:
# # Maintains the maximum of all 2nd moment running avg. till now
# state['max_exp_avg_sq'] = max(state['max_exp_avg_sq'], state['exp_avg_sq'])
# # Use the max. for normalizing running avg. of gradient
# denom = math.sqrt(state['max_exp_avg_sq'] / bias_correction2) + group['eps']
# else:
denom = math.sqrt(exp_avg_sq / bias_correction2) + group['eps']
step_size = group['lr'] / bias_correction1
# update the parameter
state['alpha'] = max(state['alpha'] - step_size * exp_avg / denom, group['min_scale'])
p.data.mul_(state['alpha'] * state['init_norm'] / curr_norm)
# print(alphas)
# print(grad_list)
# print(max(grad_list), min(grad_list), max(alphas), min(alphas))
# pdb.set_trace()
return loss
def reset_momentums(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
amsgrad = group['amsgrad']
if len(state) == 0:
state['alpha'] = 1.
state['init_norm'] = p.norm().item()
state['step'] = 0
# Exponential moving average of gradient values for the weight norms
state['exp_avg'] = 0
# Exponential moving average of squared gradient values for the weight norms
state['exp_avg_sq'] = 0
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = 0
else:
state['step'] = 0
# Exponential moving average of gradient values for the weight norms
state['exp_avg'] = 0
# Exponential moving average of squared gradient values for the weight norms
state['exp_avg_sq'] = 0
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = 0 | 7,541 | 45.269939 | 111 | py |
Diverse-ViT | Diverse-ViT-main/run_with_submitit.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for DeiT", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="deit")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| 4,075 | 31.094488 | 103 | py |
Diverse-ViT | Diverse-ViT-main/utils.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| 7,067 | 28.573222 | 94 | py |
Diverse-ViT | Diverse-ViT-main/vision_transformer_diverse.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020 Ross Wightman
"""
'''
gumbel softmax on the top layer
'''
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn_origin = (q @ k.transpose(-2, -1)) * self.scale
attn_origin = torch.softmax(attn_origin, dim=-1) #(B, Heads, N, N)
attn = self.attn_drop(attn_origin)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn_origin
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
# def forward(self, x):
# x = x + self.drop_path(self.attn(self.norm1(x)))
# x = x + self.drop_path(self.mlp(self.norm2(x)))
# return x
def forward(self, x):
shortcut = x
x, affinity_attn = self.attn(self.norm1(x))
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, affinity_attn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None, weight_init=''):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
if distilled:
assert False
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
self.features = []
self.token_grad = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if weight_init.startswith('jax'):
# leave cls token as zeros to match jax impl
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x1 = self.pos_drop(x + self.pos_embed)
xtemp = x1
for ii, blk in enumerate(self.blocks):
xtemp, affinity_attn = blk(xtemp)
if ii == 0:
attn1 = affinity_attn
xl = xtemp
xo = self.norm(xl)
xo = self.head(xo)
if self.training:
return xo, x1, xl, attn1
else:
return xo[:,0]
def _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.constant_(m.bias, head_bias)
elif n.startswith('pre_logits'):
lecun_normal_(m.weight)
nn.init.zeros_(m.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
else:
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif jax_impl and isinstance(m, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def resize_pos_embed(posemb, posemb_new, num_tokens=1):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
_logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed, getattr(model, 'num_tokens', 1))
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs.pop('img_size', default_img_size)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
| 17,793 | 40.574766 | 132 | py |
Diverse-ViT | Diverse-ViT-main/layers.py | import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _pair
class Linear(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__(in_features, out_features, bias)
self.register_buffer('weight_mask', torch.ones(self.weight.shape))
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return F.linear(input, W, b)
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, bias, padding_mode)
self.register_buffer('weight_mask', torch.ones(self.weight.shape))
def _conv_forward(self, input, weight, bias):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight, bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return self._conv_forward(input, W, b)
| 1,575 | 36.52381 | 95 | py |
Diverse-ViT | Diverse-ViT-main/datasets.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train_new' if is_train else 'val')
if not os.path.exists(root):
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| 4,235 | 36.821429 | 105 | py |
Diverse-ViT | Diverse-ViT-main/reg.py | import torch
import numpy as np
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
__all__ = ['Loss_mixing', 'Loss_cosine', 'Loss_contrastive',
'Loss_cosine_attn', 'Loss_condition_orth_weight']
# Embedding Level Size: (Batch-size, Tokens, Dims * Heads)
# Attention Level Size: (Batch-size, Heads, Tokens, Tokens) -> (Batch-size, Heads, Tokens * Tokens)
# Similarity Regularization, input: (Batch-size, Diverse-Target, Dimension)
################# Main Regularization ###############
def Loss_mixing(output, patch_target):
# output (B,197,384)
# patch_target (B,)
criterion = SoftTargetCrossEntropy()
patch_num = output.shape[1]
loss = 0
for i in range(1,patch_num):
loss += criterion(output[:,i], patch_target[:,i-1])
return loss, patch_num
def Loss_cosine(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.mean()
return loss_cos
def Loss_contrastive(h1_emb, hl_emb, eps=1e-8):
h1_emb_target = h1_emb[:,1:]
hl_emb_target = hl_emb[:,1:]
hshape = h1_emb_target.shape
# h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1).detach()
h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1)
h1_n = h1_emb_target.norm(dim=2).unsqueeze(2)
h1_norm = h1_emb_target/torch.max(h1_n, eps*torch.ones_like(h1_n))
hl_emb_target = hl_emb_target.reshape(hshape[0], hshape[1], -1)
hl_n = hl_emb_target.norm(dim=2).unsqueeze(2)
hl_norm = hl_emb_target/torch.max(hl_n, eps*torch.ones_like(hl_n))
sim_matrix = torch.einsum('abc,adc->abd', h1_norm, hl_norm)
sim_diag = torch.diagonal(sim_matrix, dim1=1, dim2=2)
dim2 = sim_diag.shape[1]
exp_sim_diag = torch.exp(sim_diag)
temp_sim = torch.sum(sim_matrix, dim=2)
temp_sim = torch.exp((temp_sim-sim_diag)/(dim2-1))
nce = -torch.log(exp_sim_diag/(exp_sim_diag+temp_sim))
return nce.mean()
def Loss_cosine_attn(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.mean() # also add diagnoal elements
return loss_cos
def dominant_eigenvalue(A, dev):
N, _ = A.size()
x = torch.rand(N, 1, device=dev)
Ax = (A @ x)
AAx = (A @ Ax)
return AAx.permute(1, 0) @ Ax / (Ax.permute(1, 0) @ Ax)
def get_singular_values(A, dev):
ATA = A.permute(1, 0) @ A
N, _ = ATA.size()
largest = dominant_eigenvalue(ATA, dev)
I = torch.eye(N, device=dev)
I = I * largest
tmp = dominant_eigenvalue(ATA - I, dev)
return tmp + largest, largest
def Loss_condition_orth_weight(W):
W = W.permute(1, 0) # (in, out)
smallest, largest = get_singular_values(W, W.device)
return torch.mean((largest - smallest)**2)
################# Additional Regularization ###############
def loss_cosine_reg(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_attn_reg(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_reg(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
target_h_emb2 = h_emb2[:,1:]
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_attn_reg(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
target_h_emb2 = h_emb2
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_reg_noabs(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
target_h_emb2 = h_emb2[:,1:]
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_attn_reg_noabs(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
target_h_emb2 = h_emb2
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.mean() # also add diagnoal elements
return loss_cos
def loss_contrastive_attn_reg(h1_emb_target, hl_emb_target, eps=1e-8):
hshape = h1_emb_target.shape
# h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1).detach()
h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1)
h1_n = h1_emb_target.norm(dim=2).unsqueeze(2)
h1_norm = h1_emb_target/torch.max(h1_n, eps*torch.ones_like(h1_n))
hl_emb_target = hl_emb_target.reshape(hshape[0], hshape[1], -1)
hl_n = hl_emb_target.norm(dim=2).unsqueeze(2)
hl_norm = hl_emb_target/torch.max(hl_n, eps*torch.ones_like(hl_n))
sim_matrix = torch.einsum('abc,adc->abd', h1_norm, hl_norm)
sim_diag = torch.diagonal(sim_matrix, dim1=1, dim2=2)
dim2 = sim_diag.shape[1]
exp_sim_diag = torch.exp(sim_diag)
temp_sim = torch.sum(sim_matrix, dim=2)
temp_sim = torch.exp((temp_sim-sim_diag)/(dim2-1))
nce = -torch.log(exp_sim_diag/(exp_sim_diag+temp_sim))
return nce.mean()
# Uniformity Regularization, weight: (Diverse-Target, Dimention) Embedding: (Batch-size, Diverse-Target, Dimension)
def norm(filt):
# filt (dim, out_dim)
filt_norm = ((filt * filt).sum(dim=0) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(1, filt.shape[1])
return filt / filt_norm
def cal(filt):
filt_norm = ((filt * filt).sum(dim=0) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(1, filt.shape[1])
norm_mat = torch.matmul(filt_norm.transpose(1,0), filt_norm)
inner_pro = torch.matmul(filt.transpose(1,0), filt)
return inner_pro / norm_mat
def loss_mhs_weight_reg(filt):
# filt (output_dim, input_dim)
filt = filt.transpose(1,0) # (in, out)
filt = norm(filt)
inner_pro = cal(filt)
final = (2.0 - 2.0 * inner_pro)
final -= torch.triu(final)
nonzeros = torch.where(final!=0)
target = torch.min(final[nonzeros])
mask = final.eq(target)
loss = -(final * mask.detach()).sum()
return loss
def norm_feature(filt):
filt_shape = filt.shape # batch-size, output_dim, input_dim
filt_norm = ((filt * filt).sum(dim=2) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(filt_shape[0], filt_shape[1], 1)
return filt / filt_norm
def cal_feature(filt):
filt_shape = filt.shape # batch-size, output_dim, input_dim
filt_norm = ((filt * filt).sum(dim=2) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(filt_shape[0], filt_shape[1], 1)
norm_mat = torch.einsum('bac,bdc->bad', filt_norm, filt_norm)
inner_pro = torch.einsum('bac,bdc->bad', filt, filt)
return inner_pro / norm_mat
def loss_mhs_feature_reg(filt):
# filt (batch-size, output_dim, input_dim)
batch_size = filt.shape[0]
target_dim = filt.shape[1]
filt = filt.reshape(batch_size, target_dim, -1)
filt = norm_feature(filt)
inner_pro = cal_feature(filt)
final = (2.0 - 2.0 * inner_pro)
final -= torch.triu(final)
loss = 0
for sample in range(batch_size):
nonzeros = torch.where(final[sample,:,:]!=0)
if nonzeros[0].shape[0] > 0:
target = torch.min(final[sample,:,:][nonzeros])
mask = final[sample,:,:].eq(target)
loss += (final[sample,:,:] * mask.detach()).sum()
return -loss/batch_size
def loss_mgd_weight_reg(filt):
# filt (output_dim, input_dim)
n_filt = filt.shape[0]
filt = filt.transpose(1,0) # (in, out)
filt = norm(filt)
inner_pro = cal(filt)
cross_terms = (2.0 - 2.0 * inner_pro)
final = torch.exp(-1 * cross_terms) + torch.diag(1e-6 * torch.ones(n_filt).to(filt.device))
loss = -torch.logdet(final)
return loss
def loss_mgd_feature_reg(filt):
# filt (batch-size, output_dim, input_dim)
batch_size = filt.shape[0]
out_dim = filt.shape[1]
filt = filt.reshape(batch_size, out_dim, -1)
filt = norm_feature(filt)
inner_pro = cal_feature(filt)
cross_terms = (2.0 - 2.0 * inner_pro)
offset = torch.diag(1e-6 * torch.ones(out_dim).to(filt.device)).repeat(batch_size, 1, 1)
final = torch.exp(-1 * cross_terms) + offset
loss = -torch.logdet(final).mean()
return loss
def loss_condition_orth_weight_reg_inverse(W):
smallest, largest = get_singular_values(W, W.device)
return torch.mean((largest - smallest)**2)
def loss_s_orth_weight_reg(A):
ATA = A @ A.permute(1, 0)
N, _ = ATA.size()
I = torch.eye(N, device=A.device)
fnorm = torch.norm(ATA-I, p='fro')
return fnorm**2
def features_dominant_eigenvalue(A):
device = A.device
B, N, _ = A.size()
x = torch.randn(B, N, 1).to(device)
for _ in range(1):
x = torch.bmm(A, x)
numerator = torch.bmm(
torch.bmm(A, x).view(B, 1, N),
x
).squeeze()
denominator = (torch.norm(x.view(B, N), p=2, dim=1) ** 2).squeeze()
return numerator / (denominator + 1e-6)
def features_get_singular_values(A):
device = A.device
AAT = torch.bmm(A, A.permute(0, 2, 1))
B, N, _ = AAT.size()
largest = features_dominant_eigenvalue(AAT)
I = torch.eye(N).expand(B, N, N).to(device)
I = I * largest.view(B, 1, 1).repeat(1, N, N)
tmp = features_dominant_eigenvalue(AAT - I)
return tmp + largest, largest
def loss_condition_orth_embedding_reg(fea, eps=1e-8):
# (batch-size, diverse-target, dimension)
B, N = fea.size(0), fea.size(1)
new_fea = fea.view(B, N, -1)
fea_n = new_fea.norm(dim=2).unsqueeze(2)
new_fea_norm = new_fea/torch.max(fea_n, eps*torch.ones_like(fea_n))
smallest, largest = features_get_singular_values(new_fea_norm)
return torch.mean((largest - smallest)**2)
def loss_condition_orth_attn_reg(fea):
# (bs, diverse-target, dim, dim)
B, H = fea.size(0), fea.size(1)
new_fea = fea.view(B, H, -1)
smallest, largest = features_get_singular_values(new_fea)
return torch.mean((largest - smallest)**2)
def loss_s_orth_attn_reg(A):
# attn (Batch-size, Heads, Tokens, Tokens)
adevice = A.device
B, H = A.shape[0], A.shape[1]
A = A.view(B, H, -1)
ATA = A @ A.permute(0,2,1)
I = torch.eye(H, device=adevice).repeat(B,1,1)
norm_pow2 = (ATA-I)**2
loss = norm_pow2.sum(dim=2).sum(dim=1).mean()
return loss
def loss_s_orth_embedding_reg(A, eps=1e-8):
# (batch-size, diverse-target, dimension)
adevice = A.device
B, H = A.shape[0], A.shape[1]
A = A.view(B, H, -1)
fea_n = A.norm(dim=2).unsqueeze(2)
new_fea_norm = A/torch.max(fea_n, eps*torch.ones_like(fea_n))
ATA = new_fea_norm @ new_fea_norm.permute(0,2,1)
I = torch.eye(H, device=adevice).repeat(B,1,1)
norm_pow2 = (ATA-I)**2
loss = norm_pow2.sum(dim=2).sum(dim=1).mean()
return loss
# Gradient Regularization: Only last Embedding: (Batch-size, Diverse-Target, Dimension)
def loss_grad_diversity_reg(grad_tensor, eps=1e-8):
# grad_tensor (Batch-size, Diverse-Target, Dimension)
grad_tensor = torch.where(torch.isnan(grad_tensor), eps*torch.ones_like(grad_tensor), grad_tensor)
token_sum_grad_tensor = grad_tensor.sum(dim=1)
sum_norm = (token_sum_grad_tensor ** 2).sum(dim=1)
norm_sum = (grad_tensor ** 2).sum(dim=2).sum(dim=1)
loss = norm_sum/sum_norm
return -loss.mean()
| 15,331 | 34.084668 | 115 | py |
Diverse-ViT | Diverse-ViT-main/models.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
from vision_transformer_diverse import VisionTransformer as VisionTransformerdiverse
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_patch16_224_diverse', 'deit_small_patch16_224_diverse', 'deit_base_patch16_224_diverse', 'deit_small_layer24_patch16_224_diverse',
]
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_tiny_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_layer24_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=384, depth=24, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
| 4,745 | 37.585366 | 146 | py |
Diverse-ViT | Diverse-ViT-main/samplers.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,292 | 37.216667 | 103 | py |
Diverse-ViT | Diverse-ViT-main/loss_scaler.py | """ CUDA / AMP utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
try:
from apex import amp
has_apex = True
except ImportError:
amp = None
has_apex = False
from timm.utils import *
__all__ = ['NativeScaler']
class NativeScaler:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, f, data_iter, data_loader, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False):
self._scaler.scale(loss).backward(create_graph=create_graph)
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
dispatch_clip_grad(parameters, clip_grad, mode=clip_mode)
data_iter = self._scaler.step(optimizer, f, data_iter, data_loader)
self._scaler.update()
return data_iter
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict) | 1,136 | 29.72973 | 138 | py |
Diverse-ViT | Diverse-ViT-main/gradient_utils.py | import torch
from torch import nn
from gradinit_optimizers import RescaleAdam
import numpy as np
import os
class Scale(torch.nn.Module):
def __init__(self):
super(Scale, self).__init__()
self.weight = torch.nn.Parameter(torch.ones(1))
def forward(self, x):
return x * self.weight
class Bias(torch.nn.Module):
def __init__(self):
super(Bias, self).__init__()
self.bias = torch.nn.Parameter(torch.zeros(1))
def forward(self, x):
return x + self.bias
def get_ordered_params(net):
param_list = []
for m in net.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.LayerNorm):
param_list.append(m.weight)
if m.bias is not None:
param_list.append(m.bias)
elif isinstance(m, Scale):
param_list.append(m.weight)
elif isinstance(m, Bias):
param_list.append(m.bias)
return param_list
def set_param(module, name, alg, eta, grad):
weight = getattr(module, name)
# remove this parameter from parameter list
del module._parameters[name]
# compute the update steps according to the optimizers
if alg.lower() == 'sgd':
gstep = eta * grad
elif alg.lower() == 'adam':
gstep = eta * grad.sign()
else:
raise RuntimeError("Optimization algorithm {} not defined!".format(alg))
# add the updated parameter as the new parameter
module.register_parameter(name + '_prev', weight)
# recompute weight before every forward()
updated_weight = weight - gstep.data
setattr(module, name, updated_weight)
def take_opt_step(net, grad_list, alg='adam', eta=0.1):
"""Take the initial step of the chosen optimizer.
"""
assert alg.lower() in ['adam', 'sgd']
idx = 0
for n, m in net.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.LayerNorm):
grad = grad_list[idx]
set_param(m, 'weight', alg, eta, grad)
idx += 1
if m.bias is not None:
grad = grad_list[idx]
set_param(m, 'bias', alg, eta, grad)
idx += 1
elif isinstance(m, Scale):
grad = grad_list[idx]
set_param(m, 'weight', alg, eta, grad)
idx += 1
elif isinstance(m, Bias):
grad = grad_list[idx]
set_param(m, 'bias', alg, eta, grad)
idx += 1
def recover_params(net):
"""Reset the weights to the original values without the gradient step
"""
def recover_param_(module, name):
delattr(module, name)
setattr(module, name, getattr(module, name + '_prev'))
del module._parameters[name + '_prev']
for n, m in net.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.LayerNorm):
recover_param_(m, 'weight')
if m.bias is not None:
recover_param_(m, 'bias')
elif isinstance(m, Scale):
recover_param_(m, 'weight')
elif isinstance(m, Bias):
recover_param_(m, 'bias')
def set_bn_modes(net):
"""Switch the BN layers into training mode, but does not track running stats.
"""
for n, m in net.named_modules():
if isinstance(m, nn.BatchNorm2d):
m.training = True
m.track_running_stats = False
def recover_bn_modes(net):
for n, m in net.named_modules():
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = True
def get_scale_stats(model, optimizer):
stat_dict = {}
# all_s_list = [p.norm().item() for n, p in model.named_parameters() if 'bias' not in n]
all_s_list = []
for param_group in optimizer.param_groups:
for p in param_group['params']:
if 'alpha' in optimizer.state[p]:
all_s_list.append(optimizer.state[p]['alpha'])
stat_dict['s_max'] = max(all_s_list)
stat_dict['s_min'] = min(all_s_list)
stat_dict['s_mean'] = np.mean(all_s_list)
all_s_list = []
for n, p in model.named_parameters():
if 'bias' not in n:
if 'alpha' in optimizer.state[p]:
all_s_list.append(optimizer.state[p]['alpha'])
stat_dict['s_weight_max'] = max(all_s_list)
stat_dict['s_weight_min'] = min(all_s_list)
stat_dict['s_weight_mean'] = np.mean(all_s_list)
return stat_dict
def get_batch(data_iter, data_loader):
try:
inputs, targets = next(data_iter)
except:
data_iter = iter(data_loader)
inputs, targets = next(data_iter)
inputs, targets = inputs.cuda(), targets.cuda()
return data_iter, inputs, targets
def gradinit(net, dataloader,
gradinit_lr=1e-6, gradinit_min_scale=0.01, gradinit_grad_clip=1,
gradinit_gamma=1, gradinit_eta=0.1, batch_no_overlap=False, gradinit_iters=2000):
# if isinstance(net, torch.nn.DataParallel):
# net_top = net.module
# else:
# net_top = net
bias_params = [p for n, p in net.named_parameters() if 'bias' in n]
weight_params = [p for n, p in net.named_parameters() if 'weight' in n]
optimizer = RescaleAdam([{'params': weight_params, 'min_scale': gradinit_min_scale, 'lr': gradinit_lr},
{'params': bias_params, 'min_scale': 0, 'lr': gradinit_lr}],
grad_clip=gradinit_grad_clip)
criterion = nn.CrossEntropyLoss()
net.eval() # This further shuts down dropout, if any.
total_loss, total_l0, total_l1, total_residual, total_gnorm = 0, 0, 0, 0, 0
total_sums, total_sums_gnorm = 0, 0
cs_count = 0
total_iters = 0
obj_loss, updated_loss, residual = -1, -1, -1
data_iter = iter(dataloader)
# get all the parameters by order
params_list = get_ordered_params(net)
while True:
eta = gradinit_eta
# continue
# get the first half of the minibatch
data_iter, init_inputs_0, init_targets_0 = get_batch(data_iter, dataloader)
# Get the second half of the data.
data_iter, init_inputs_1, init_targets_1 = get_batch(data_iter, dataloader)
init_inputs = torch.cat([init_inputs_0, init_inputs_1])
init_targets = torch.cat([init_targets_0, init_targets_1])
# compute the gradient and take one step
outputs = net(init_inputs)
init_loss = criterion(outputs, init_targets)
all_grads = torch.autograd.grad(init_loss, params_list, create_graph=True)
# # Compute the loss w.r.t. the optimizer
# if args.gradinit_alg.lower() == 'adam':
# # grad-update inner product
gnorm = sum([g.abs().sum() for g in all_grads])
loss_grads = all_grads
# else:
# gnorm_sq = sum([g.square().sum() for g in all_grads])
# gnorm = gnorm_sq.sqrt()
# if args.gradinit_normalize_grad:
# loss_grads = [g / gnorm for g in all_grads]
# else:
# loss_grads = all_grads
total_gnorm += gnorm.item()
total_sums_gnorm += 1
if gnorm.item() > gradinit_gamma:
# project back into the gradient norm constraint
optimizer.zero_grad()
gnorm.backward()
optimizer.step(is_constraint=True)
cs_count += 1
else:
# take one optimization step
take_opt_step(net, loss_grads, eta=eta)
total_l0 += init_loss.item()
data_iter, inputs_2, targets_2 = get_batch(data_iter, dataloader)
if batch_no_overlap:
# sample a new batch for the half
data_iter, init_inputs_0, init_targets_0 = get_batch(data_iter, dataloader)
updated_inputs = torch.cat([init_inputs_0, inputs_2])
updated_targets = torch.cat([init_targets_0, targets_2])
# compute loss using the updated network
# net_top.opt_mode(True)
updated_outputs = net(updated_inputs)
# net_top.opt_mode(False)
updated_loss = criterion(updated_outputs, updated_targets)
# If eta is larger, we should expect obj_loss to be even smaller.
obj_loss = updated_loss / eta
recover_params(net)
optimizer.zero_grad()
obj_loss.backward()
optimizer.step(is_constraint=False)
total_l1 += updated_loss.item()
total_loss += obj_loss.item()
total_sums += 1
total_iters += 1
if (total_sums_gnorm > 0 and total_sums_gnorm % 10 == 0) or total_iters == gradinit_iters or total_iters == gradinit_iters:
stat_dict = get_scale_stats(net, optimizer)
print_str = "Iter {}, obj iters {}, eta {:.3e}, constraint count {} loss: {:.3e} ({:.3e}), init loss: {:.3e} ({:.3e}), update loss {:.3e} ({:.3e}), " \
"total gnorm: {:.3e} ({:.3e})\t".format(
total_sums_gnorm, total_sums, eta, cs_count,
float(obj_loss), total_loss / total_sums if total_sums > 0 else -1,
float(init_loss), total_l0 / total_sums if total_sums > 0 else -1,
float(updated_loss), total_l1 / total_sums if total_sums > 0 else -1,
float(gnorm), total_gnorm / total_sums_gnorm)
for key, val in stat_dict.items():
print_str += "{}: {:.2e}\t".format(key, val)
print(print_str)
if total_iters == gradinit_iters:
break
| 9,598 | 34.420664 | 163 | py |
Diverse-ViT | Diverse-ViT-main/mix.py | """ Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2020 Ross Wightman
"""
import numpy as np
import torch
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
onehot_y = torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
onehot_y = onehot_y.unsqueeze(2)
return onehot_y.repeat(1,1,196)
def mixup_target(target, num_classes, lam, patch_lam, smoothing=0.0, device='cuda'):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
mix_y = y1 * patch_lam + y2 * (1. - patch_lam)
single_y = y1[:,:,0] * lam + y2[:,:,0] * (1. - lam)
return mix_y.permute(0,2,1), single_y
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
bbox_area = (yh - yl) * (xh - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return yl, yh, xl, xh, lam
def patch_wise_lam(img_shape, yl, yh, xl, xh, device='cuda'):
binary_mask = torch.ones(img_shape[-2:], device=device)
binary_mask[yl:yh, xl:xh] = 0
# kernal-size = 16*16
# image-size = 224*224
patch_wise_lam = torch.ones(14,14, device=device)
for y_patch_idx in range(14):
for x_patch_idx in range(14):
ys = y_patch_idx*16
xs = x_patch_idx*16
patch_wise_lam[y_patch_idx, x_patch_idx] = torch.mean(binary_mask[ys:ys+16, xs:xs+16])
return patch_wise_lam.flatten()
class Mixup_diversity:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=0.8, cutmix_alpha=1.0, prob=1.0, switch_prob=0.5,
label_smoothing=0.1, num_classes=1000):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
def _params_per_batch(self):
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
lam = float(lam_mix)
return lam, use_cutmix
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
yl, yh, xl, xh, lam = rand_bbox(x.shape, lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] #x=0, x_flip=1
patch_lam = patch_wise_lam(x.shape, yl, yh, xl, xh)
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
patch_lam = torch.ones(196, device='cuda')*lam
return patch_lam, lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
patch_lam, lam = self._mix_batch(x)
patch_target, target = mixup_target(target, self.num_classes, lam, patch_lam, self.label_smoothing)
return x, patch_target, target
| 5,266 | 42.528926 | 120 | py |
pennylane-ls | pennylane-ls-master/setup.py | from setuptools import setup
pennylane_devices_list = [
"synqs.sqs = pennylane_ls:SingleQuditDevice",
"synqs.mqs = pennylane_ls:MultiQuditDevice",
"synqs.fs = pennylane_ls:FermionDevice",
]
setup(
name="pennylane-ls",
version="0.3.0",
description="A Pennylane plugin for cold atom quantum simulators",
url="https://www.github.com/synqs/pennylane-ls",
author="Fred Jendrzejewski",
author_email="fnj@kip.uni-heidelberg.de",
license="BSD-2",
packages=["pennylane_ls"],
zip_safe=False,
install_requires=[
"pennylane >= 0.16",
"numpy",
],
entry_points={
"pennylane.plugins": pennylane_devices_list
}, # for registering the pennylane device(s)
)
| 731 | 26.111111 | 70 | py |
pennylane-ls | pennylane-ls-master/heroku_credentials.py | # each user has his own credentials file. Do not share this with other users.
username = "synqs_test" # Put here your username
password = "Cm2TXfRmXennMQ5" # and the pwd
| 173 | 33.8 | 77 | py |
pennylane-ls | pennylane-ls-master/examples/example_credentials.py | # each user has his own credentials file. Do not share this with other users.
username = "EXAMPLE-NAME" # Put here your username
password = "EXAMPLE-PASSWORD" # and the pwd
| 176 | 34.4 | 77 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/multi_qudit_device.py | """
A device that allows us to implement operation on multiple qudits.
The backend is a remote simulator.
"""
import json
import requests
import numpy as np
from .django_device import DjangoDevice
# observables
from .multi_qudit_ops import LZ, ZObs
# operations
from .multi_qudit_ops import RLX, RLZ, RLZ2, RLXLY, RLZLZ, Load
# classes
from .multi_qudit_ops import MultiQuditOperation
class MultiQuditDevice(DjangoDevice):
"""
The multi qudit device class, which is remotely calling the simulator.
"""
## Define operation map for the experiment
name = "Multi Qudit Quantum Simulator plugin"
pennylane_requires = ">=0.16.0"
version = "0.0.1"
author = "Fred Jendrzejewski"
short_name = "synqs.mqs"
_observable_map = {"LZ": LZ, "ZObs": ZObs}
_operation_map = {
"RLX": RLX,
"RLZ": RLZ,
"RLZ2": RLZ2,
"RLXLY": RLXLY,
"RLZLZ": RLZLZ,
"Load": Load,
}
def __init__(
self,
wires=1,
shots=1,
url="http://qsimsim.synqs.org/api/multiqudit/",
username=None,
password=None,
job_id=None,
blocking=True,
):
"""
The initial part.
"""
super().__init__(
url=url,
wires=wires,
shots=shots,
username=username,
password=password,
blocking=blocking,
job_id=job_id,
)
self.qdim = 2
@classmethod
def capabilities(cls):
capabilities = super().capabilities().copy()
capabilities.update(
model="qudit",
supports_finite_shots=True,
supports_tensor_observables=True,
returns_probs=False,
)
return capabilities
def pre_apply(self):
self.reset()
self.job_payload = {
"experiment_0": {
"instructions": [],
"num_wires": len(self.wires),
"shots": self.shots,
},
}
def apply(self, operation, wires, par):
"""
Apply the gates.
"""
# check with different operations
operation_class = self._operation_map[operation]
if issubclass(operation_class, MultiQuditOperation):
l_obj, qdim = operation_class.qudit_operator(par, wires)
# qdim is only non zero if the load gate is implied.
# so only in this case we will change it.
if qdim:
self.qdim = qdim
self.job_payload["experiment_0"]["instructions"].append(l_obj)
else:
raise NotImplementedError()
def expval(self, observable, wires, par):
"""
Retrieve the requested observable expectation value.
"""
try:
if self.job_id is None:
self.sample(observable, wires, par)
if self.check_job_status() != "DONE":
return "Job_not_done"
shots = self.sample(observable, wires, par)
return np.mean(shots, axis=0)
except ValueError as exc:
raise NotImplementedError() from exc
def sample(self, observable, wires, par):
"""
Retrieve the requested observable expectation value.
"""
# submit the job
if self.job_id is None:
wires = wires if isinstance(wires, list) else [wires]
for _, name in enumerate(wires):
m_obj = ("measure", [name.labels[0]], [])
self.job_payload["experiment_0"]["instructions"].append(m_obj)
url = self.url_prefix + "post_job/"
job_response = requests.post(
url,
data={
"json": json.dumps(self.job_payload),
"username": self.username,
"password": self.password,
},
)
self.job_id = (job_response.json())["job_id"]
if self.blocking:
self.wait_till_done()
else:
return self.job_id
if self.blocking:
self.wait_till_done()
elif self.check_job_status() != "DONE":
return self.job_id
# obtain the job result
result_payload = {"job_id": self.job_id}
url = self.url_prefix + "get_job_result/"
result_response = requests.get(
url,
params={
"json": json.dumps(result_payload),
"username": self.username,
"password": self.password,
},
)
results_dict = json.loads(result_response.text)
results = results_dict["results"][0]["data"]["memory"]
num_obs = len(wires)
out = np.zeros((self.shots, num_obs))
for i1 in np.arange(self.shots):
temp = results[i1].split()
for i2 in np.arange(num_obs):
out[i1, i2] = int(temp[i2])
return out
def reset(self):
self.job_id = None
| 5,045 | 26.275676 | 78 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/single_qudit_ops.py | """
Define the operations that can be applied on a single_qudit device.
"""
from typing import List, Tuple
import abc
from pennylane.operation import Operation
from pennylane.operation import Observable
import numpy as np
class SingleQuditOperation(Operation):
"""
A base class for all the single qudit operation that will later inherit from it.
"""
@classmethod
@abc.abstractmethod
def qudit_operator(cls, par: List[float]) -> Tuple:
"""the function that transforms the received samples into the appropiate
operation
Args:
par: parameter for the gate
"""
raise NotImplementedError()
class SingleQuditObservable(Observable):
"""
A base class for all the single qudit observables that will later inherit from it.
"""
@classmethod
@abc.abstractmethod
def qudit_operator(cls, samples: np.ndarray, qdim: List[int]):
"""the function that transforms the received samples into the appropiate
observable
Args:
samples: a numpy array of samples
qdim: the dimension of the qudit
"""
raise NotImplementedError()
class Load(SingleQuditOperation):
"""The load operation"""
num_params = 1
num_wires = 1
par_domain = "N"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par):
l_obj = ("load", [0], par)
qdim = par[0] + 1
return l_obj, qdim
class RLX(SingleQuditOperation):
"""The RLX operation"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par):
theta = par[0]
l_obj = ("rlx", [0], [theta % (2 * np.pi)])
return l_obj, False
class RLZ(SingleQuditOperation):
"""The rLz operation"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par):
theta = par[0]
l_obj = ("rlz", [0], [theta % (2 * np.pi)])
return l_obj, False
class RLZ2(SingleQuditOperation):
"""The rLz operation"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par):
l_obj = ("rlz2", [0], par)
return l_obj, False
class ID(SingleQuditOperation):
"""Custom gate"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par):
pass
class ZObs(SingleQuditObservable):
"""Custom observable"""
num_params = 0
num_wires = 1
par_domain = "R"
@classmethod
def qudit_operator(cls, samples, qdim):
return samples
class LZ(SingleQuditObservable):
"""Custom observable"""
num_params = 0
num_wires = 1
par_domain = "R"
@classmethod
def qudit_operator(cls, samples, qdim):
return samples - qdim / 2
class LZ2(SingleQuditObservable):
"""Custom observable"""
num_params = 0
num_wires = 1
par_domain = "R"
@classmethod
def qudit_operator(cls, samples, qdim):
lz = samples - qdim / 2
return lz ** 2
| 3,314 | 18.96988 | 86 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/multi_qudit_ops.py | """
Define the operations that can be applied on a multi_qudit device.
"""
from typing import List, Tuple
import abc
from pennylane.operation import Operation
from pennylane.operation import Observable
import numpy as np
class MultiQuditOperation(Operation):
"""
A base class for all the single qudit operation that will later inherit from it.
"""
@classmethod
@abc.abstractmethod
def qudit_operator(cls, par: List[float], wires: List[int]) -> Tuple:
"""the function that transforms the received samples into the appropiate
operation
Args:
par: parameter for the gate
wires: The wires onto which we should apply the gates.
"""
raise NotImplementedError()
class MultiQuditObservable(Observable):
"""
A base class for all the multi qudit observables that will later inherit from it.
"""
@classmethod
@abc.abstractmethod
def qudit_operator(cls, samples: List[int], qdim: List[int]):
"""the function that transforms the received samples into the appropiate
observable
Args:
samples: a numpy array of samples
qdim: the dimension of the qudit we are working with
"""
raise NotImplementedError()
## Single qudit gates
class Load(MultiQuditOperation):
"""The load operation"""
num_params = 1
num_wires = 1
par_domain = "N"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par, wires):
l_obj = ("load", [wires[0]], par)
qdim = par[0] + 1
return l_obj, qdim
class RLX(MultiQuditOperation):
"""The RLX operation"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par, wires):
theta = par[0]
l_obj = ("rlx", [wires[0]], [theta % (2 * np.pi)])
return l_obj, False
class RLZ(MultiQuditOperation):
"""The RLZ operation"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par, wires):
theta = par[0]
l_obj = ("rlz", [wires[0]], [theta % (2 * np.pi)])
return l_obj, False
class RLZ2(MultiQuditOperation):
"""The RLZ2 operation"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par, wires):
theta = par[0]
l_obj = ("rlz2", [wires[0]], [theta % (2 * np.pi)])
return l_obj, False
class ID(MultiQuditOperation):
"""Identity gate"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def qudit_operator(cls, par, wires):
pass
## Two qudit gates
class RLXLY(MultiQuditOperation):
"""LxLy or FlipFlop gate"""
num_params = 1
num_wires = 2
par_domain = "R"
@classmethod
def qudit_operator(cls, par, wires):
theta = par[0]
l_obj = ("rlxly", [wires[0], wires[1]], [theta % (2 * np.pi)])
return l_obj, False
class RLZLZ(MultiQuditOperation):
"""LzLz or generalized Ising gate"""
num_params = 1
num_wires = 2
par_domain = "R"
@classmethod
def qudit_operator(cls, par, wires):
theta = par[0]
l_obj = ("rlzlz", [wires[0], wires[1]], [theta % (2 * np.pi)])
return l_obj, False
## Observables
class ZObs(MultiQuditObservable):
"""Number of atoms operator"""
num_params = 0
num_wires = 1
par_domain = "R"
@classmethod
def qudit_operator(cls, samples, qdim):
return samples
class LZ(MultiQuditObservable):
"""Lz observable"""
num_params = 0
num_wires = 1
par_domain = "R"
@classmethod
def qudit_operator(cls, samples, qdim):
return samples - qdim / 2
| 3,960 | 20.069149 | 85 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/_version.py | """Version information.
Version number (major.minor.patch[-label])
"""
__version__ = "0.3.0[-dev]"
| 103 | 16.333333 | 45 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/fermion_device.py | """
A device that allows us to implement operation ons a fermion tweezer experiments.
The backend is a remote simulator.
"""
import json
from collections import OrderedDict
import numpy as np
import requests
from pennylane import DeviceError
from .django_device import DjangoDevice
# observables
from .fermion_ops import ParticleNumber
# operations
from .fermion_ops import Load, HartreeFock, Hop, Inter, Phase, PauliZ, Identity
# observations
from .fermion_ops import FermionObservable, FermionOperation
class FermionDevice(DjangoDevice):
"""
The device that allows us to implement operation on a fermion tweezer experiments.
The backend is a remote simulator.
"""
## Define operation map for the experiment
_operation_map = {
"Load": Load,
"Hop": Hop,
"Tunneling": Hop,
"Inter": Inter,
"OnSiteInteraction": Inter,
"Phase": Phase,
"ChemicalPotential": Phase,
"HartreeFock": HartreeFock,
}
name = "Fermion Quantum Simulator Simulator plugin"
pennylane_requires = ">=0.16.0"
version = "0.2.0"
author = "Rohit P. Bhatt, Christian Gogolin, Fred Jendrzejewski, Valentin Kasper"
short_name = "synqs.fs"
_observable_map = {
"ParticleNumber": ParticleNumber,
"PauliZ": PauliZ,
"Identity": Identity,
}
# pylint: disable=R0913
def __init__(
self,
wires=8,
shots=1,
url="http://qsimsim.synqs.org/api/fermions/",
username=None,
password=None,
job_id=None,
blocking=True,
):
"""
The initial part.
"""
super().__init__(
url=url,
wires=wires,
shots=shots,
username=username,
password=password,
blocking=blocking,
job_id=job_id,
)
if not self.num_wires <= 8:
raise ValueError("Number of wires may be at most 8")
self._samples = None
@classmethod
def capabilities(cls):
capabilities = super().capabilities().copy()
capabilities.update(
model="fermions",
supports_finite_shots=True,
supports_tensor_observables=True,
returns_probs=False,
)
return capabilities
def apply(self, operation, wires, par):
"""
Apply the gates.
"""
# check with different operations
operation_class = self._operation_map[operation]
if issubclass(operation_class, FermionOperation):
l_obj = operation_class.fermion_operator(wires, par)
if not isinstance(l_obj, list):
l_obj = [l_obj]
for l_obj_element in l_obj:
self.job_payload["experiment_0"]["instructions"].append(l_obj_element)
else:
raise NotImplementedError()
def expval(self, observable=None, wires=None, par=None):
"""
Retrieve the requested observable expectation value.
"""
if self._observable_map[observable] == Identity:
return 1.0
shots = self.sample(observable, wires, par)
if self._observable_map[observable] == PauliZ:
shots = np.ones(shots.shape) - 2 * shots
mean = np.mean(shots, axis=0)
result = mean[wires.tolist()]
return result.item() if len(result) == 1 else result
def var(self, observable=None, wires=None, par=None):
"""
Retrieve the requested observable variance.
"""
if self._observable_map[observable] == Identity:
return 0.0
shots = self.sample(observable, wires, par)
if self._observable_map[observable] == PauliZ:
shots = np.ones(shots.shape) - 2 * shots
var = np.var(shots, axis=0)
result = var[wires.tolist()]
return result.item() if len(result) == 1 else result
def sample(self, observable, wires, par):
"""
Retrieve the requested observable sample.
"""
observable_class = self._observable_map[observable]
if issubclass(observable_class, FermionObservable):
return self._samples
raise NotImplementedError()
def probability(self, wires=None):
"""
Generates the probibility distribution for all observed outcomes.
"""
# pylint: disable=R1728
shots = self._samples
if wires is not None:
shots = shots[:, wires.tolist()]
patterns, counts = np.unique(shots, axis=0, return_counts=True)
probabilities = np.zeros(2 ** len(wires))
denominator = counts.sum()
for pattern, count in zip(patterns, counts):
probability = count / denominator
probabilities[
sum(2 ** idx for idx, d in enumerate(pattern[::-1]) if d == 1)
] = probability
patterns = [
tuple([int(d) for d in bin(comp_state_index)[2:].zfill(len(wires))])
for comp_state_index in range(2 ** len(wires))
]
return OrderedDict(zip(patterns, probabilities))
# pylint: disable=R1710
def pre_measure(self):
"""
Apply the operations that are necessary to submit the job.
"""
wires = self.wires
for wire in wires:
m_obj = ("measure", [wire], [])
self.job_payload["experiment_0"]["instructions"].append(m_obj)
url = self.url_prefix + "post_job/"
job_response = requests.post(
url,
data={
"json": json.dumps(self.job_payload),
"username": self.username,
"password": self.password,
},
)
self.job_id = (job_response.json())["job_id"]
if self.blocking is True:
self.wait_till_done()
else:
return self.job_id
# obtain the job result
result_payload = {"job_id": self.job_id}
url = self.url_prefix + "get_job_result/"
result_response = requests.get(
url,
params={
"json": json.dumps(result_payload),
"username": self.username,
"password": self.password,
},
)
results_dict = json.loads(result_response.text)
if "results" not in results_dict:
raise DeviceError(result_response.text)
results = results_dict["results"][0]["data"]["memory"]
num_obs = len(wires)
out = np.zeros((self.shots, num_obs), dtype=int)
for ind_1 in np.arange(self.shots):
temp = results[ind_1].split()
for ind_2 in np.arange(num_obs):
out[ind_1, ind_2] = int(temp[ind_2])
self._samples = out
def reset(self):
self._samples = None
self.job_id = None
| 6,874 | 28.633621 | 86 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/single_qudit_device.py | """
A device that allows us to implement operation on a single qudit. The backend is a remote simulator.
"""
import json
import requests
import numpy as np
from .django_device import DjangoDevice
# observables
from .single_qudit_ops import LZ, LZ2, ZObs
# operations
from .single_qudit_ops import RLX, RLZ, RLZ2, Load
# classes
from .single_qudit_ops import SingleQuditObservable, SingleQuditOperation
# operations for local devices
class SingleQuditDevice(DjangoDevice):
"""
The single qudit device class, which is remotely calling the simulator.
"""
## Define operation map for the experiment
_operation_map = {"RLX": RLX, "RLZ": RLZ, "RLZ2": RLZ2, "Load": Load}
name = "Single Qudit Quantum Simulator Simulator plugin"
pennylane_requires = ">=0.16.0"
version = "0.0.1"
author = "Fred Jendrzejewski"
short_name = "synqs.sqs"
_observable_map = {"LZ": LZ, "ZObs": ZObs, "LZ2": LZ2}
def __init__(
self,
shots=1,
username=None,
url="http://qsimsim.synqs.org/api/singlequdit/",
password=None,
job_id=None,
blocking=True,
):
"""
The initial part.
"""
super().__init__(
url=url,
wires=1,
shots=shots,
username=username,
password=password,
blocking=blocking,
job_id=job_id,
)
self.qdim = 2
def apply(self, operation, wires, par):
"""
Apply the gates.
"""
# check with different operations
operation_class = self._operation_map[operation]
if issubclass(operation_class, SingleQuditOperation):
l_obj, qdim = operation_class.qudit_operator(par)
# qdim is only non zero if the load gate is implied.
# so only in this case we will change it.
if qdim:
self.qdim = qdim
self.job_payload["experiment_0"]["instructions"].append(l_obj)
else:
raise NotImplementedError()
def expval(self, observable, wires, par):
"""
Retrieve the requested observable expectation value.
"""
try:
if self.job_id is None:
self.sample(observable, wires, par)
if self.check_job_status() != "DONE":
return "Job_not_done"
shots = self.sample(observable, wires, par)
return shots.mean()
except ValueError as exc:
raise NotImplementedError() from exc
def var(self, observable, wires, par):
"""
Retrieve the requested observable variance.
"""
try:
if self.job_id is None:
self.sample(observable, wires, par)
if self.check_job_status() != "DONE":
return "Job_not_done"
shots = self.sample(observable, wires, par)
return shots.var()
except ValueError as exc:
raise NotImplementedError() from exc
def sample(self, observable, wires, par):
"""
Retrieve the requested observable expectation value.
"""
observable_class = self._observable_map[observable]
if issubclass(observable_class, SingleQuditObservable):
# submit the job
if self.job_id is None:
m_obj = ("measure", [0], [])
url = self.url_prefix + "post_job/"
self.job_payload["experiment_0"]["instructions"].append(m_obj)
job_response = requests.post(
url,
data={
"json": json.dumps(self.job_payload),
"username": self.username,
"password": self.password,
},
)
self.job_id = (job_response.json())["job_id"]
if self.blocking is True:
self.wait_till_done()
else:
return self.job_id
if self.blocking is True:
self.wait_till_done()
elif self.check_job_status() != "DONE":
return self.job_id
# obtain the job result
result_payload = {"job_id": self.job_id}
url = self.url_prefix + "get_job_result/"
result_response = requests.get(
url,
params={
"json": json.dumps(result_payload),
"username": self.username,
"password": self.password,
},
)
results_dict = json.loads(result_response.text)
shots = results_dict["results"][0]["data"]["memory"]
shots = np.array([int(shot) for shot in shots])
# and give back the appropiate observable.
shots = observable_class.qudit_operator(shots, self.qdim)
return shots
raise NotImplementedError()
def reset(self):
self.qdim = 2
self.job_id = None
self.job_payload = None
| 5,096 | 29.520958 | 100 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/__init__.py | """
The initialization of the `pennylane-ls` module
"""
from .single_qudit_device import SingleQuditDevice
from .multi_qudit_device import MultiQuditDevice
from .fermion_device import FermionDevice
from ._version import __version__
| 233 | 25 | 50 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/django_device.py | """
Define the base class for communication with the Django API as laid out for
`labscript-qc`
"""
import time
import json
import requests
from pennylane import Device
class DjangoDevice(Device):
"""
The base class for all devices that call to an external server.
"""
_operation_map = {}
_observable_map = {}
# pylint: disable=R0913
def __init__(
self,
url: str,
wires=1,
shots=1,
username=None,
password=None,
job_id=None,
blocking=True,
):
"""
The initial part.
"""
super().__init__(wires=wires, shots=shots)
self.username = username
self.password = password
self.blocking = blocking
self.job_id = job_id
self.url_prefix = url
self.job_payload = {}
def check_job_status(self) -> str:
"""
Check remotely if the job was done already.
"""
status_payload = {"job_id": self.job_id}
url = self.url_prefix + "get_job_status/"
status_response = requests.get(
url,
params={
"json": json.dumps(status_payload),
"username": self.username,
"password": self.password,
},
)
job_status = (status_response.json())["status"]
job_status_detail = (status_response.json())["detail"]
if job_status == "ERROR":
raise SyntaxError(job_status_detail)
return job_status
def wait_till_done(self):
"""
The waiting function that blocks the program
"""
while True:
time.sleep(2)
job_status = self.check_job_status()
if job_status == "DONE":
break
def pre_apply(self):
"""
Set up the necessary dictonaries that will be later send to the server.
"""
self.reset()
self.job_payload = {
"experiment_0": {
"instructions": [],
"num_wires": 1,
"shots": self.shots,
"wire_order": "interleaved",
},
}
@property
def operations(self):
return set(self._operation_map.keys())
@property
def observables(self):
return set(self._observable_map.keys())
| 2,341 | 24.182796 | 79 | py |
pennylane-ls | pennylane-ls-master/pennylane_ls/fermion_ops.py | """
Define the operations that can be applied on a fermionic device.
"""
import abc
from typing import List, Tuple
from pennylane.wires import Wires
from pennylane.operation import Operation, AnyWires, AllWires
from pennylane.operation import Observable
import numpy as np
class FermionOperation(Operation):
"""
A base class for all the fermionic operation that will later inherit from it.
"""
@classmethod
@abc.abstractmethod
def fermion_operator(cls, wires: List[int], par: List[int]) -> Tuple:
"""the function that transforms the received samples into the appropiate
operation
Args:
wires: onto which wire should the gates be applied
par: parameter for the gate
"""
raise NotImplementedError()
class FermionObservable(Observable):
"""
A base class for all the fermionic observables that will later inherit from it.
"""
@classmethod
@abc.abstractmethod
def fermion_operator(cls, samples: np.ndarray):
"""the function that transforms the received samples into the appropiate
observable
Args:
samples: a numpy array of samples
"""
raise NotImplementedError()
class Load(FermionOperation):
r"""The load preparation
Loads one fermionic particle into a wire.
Args:
arg1 (int): number of the wire
**Example**
FermionicDevice = FermionDevice(shots = 5, username = username, password = password)
@qml.qnode(FermionicDevice)
def quantum_circuit(alpha=0):
Load(wires = 0)
Load(wires = 3)
return qml.sample(ParticleNumber(wires=FermionicDevice.wires))
"""
num_params = 0
num_wires = 1
par_domain = None
grad_method = None
grad_recipe = None
@classmethod
def fermion_operator(cls, wires, par):
l_obj = ("load", wires.tolist(), [])
return l_obj
class HartreeFock(FermionOperation):
"""The Hartree Fock preparation."""
num_params = 2
num_wires = AllWires
par_domain = "I"
@classmethod
def fermion_operator(cls, wires, par):
nalpha, nbeta = par
l_obj = []
for idx, wire in enumerate(wires):
if idx % 2 == 0 and idx // 2 < nalpha:
l_obj.append(Load.fermion_operator(Wires(wire), None))
elif (idx - 1) % 2 == 0 and (idx - 1) // 2 < nbeta:
l_obj.append(Load.fermion_operator(Wires(wire), None))
return l_obj
class Hop(FermionOperation):
# pylint: disable=C0301
r"""The hop operation
One fermionic particle moves from one wire to another wire. The gate implements the transformation:
.. math::
G^{\mathrm{hop}}_{j,k}(\theta) = \exp(-i \theta/2 \sum_{\sigma}(c_{j,\sigma}^\dagger c_{k,\sigma} + \text{h.c}))
Args:
wires (int): the four indices of the wires that are coupled.
par (float): the angle of the gate.
**Example**
FermionicDevice = FermionDevice(shots = 5, username = username, password = password)
@qml.qnode(FermionicDevice)
def quantum_circuit(alpha=0):
Load(wires = 0)
Hop(alpha, wires=[0,1,2,3])
return qml.sample(ParticleNumber(wires=FermionicDevice.wires))
"""
num_params = 1
num_wires = 4
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def fermion_operator(cls, wires, par):
theta = par[0]
l_obj = ("fhop", wires.tolist(), [theta / 2 % (2 * np.pi)])
return l_obj
class Inter(FermionOperation):
r"""The interaction of fermionic modes
Fermionic particle interact with each other on each site. The gate implements the transformation:
.. math::
G^{\mathrm{int}}(\theta) = \exp(-i theta \sum_{j=0}^{n-1} n_{j,\uparrow} n_{j,\downarrow})
Args:
wires (int): the indices of the wires that are coupled.
par (float): the angle of the gate.
**Example**
FermionicDevice = FermionDevice(shots = 5, username = username, password = password)
@qml.qnode(FermionicDevice)
def quantum_circuit(alpha=0):
Load(wires = 0)
Load(wires = 1)
Inter(alpha, wires=[0,1,2,3,4,5,6,7])
return qml.sample(ParticleNumber(wires=FermionicDevice.wires))
"""
num_params = 1
num_wires = AllWires # AllWires#AnyWires
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def fermion_operator(cls, wires, par):
theta = par[0]
l_obj = ("fint", wires.tolist(), [theta % (2 * np.pi)])
return l_obj
class Phase(FermionOperation):
r"""The phase operation.
Application of a local chemical potential. The gate implements the transformation:
.. math::
G^{\mathrm{int}}_j(\theta) = \exp(-i theta (n_{j,\uparrow} + n_{j,\downarrow}))
Args:
wires (int): the four indices of the wires that are coupled.
par (float): the angle of the gate.
**Example**
FermionicDevice = FermionDevice(shots = 5, username = username, password = password)
@qml.qnode(FermionicDevice)
def quantum_circuit(alpha=0):
Load(wires = 0)
Load(wires = 1)
Phase(alpha, wires = [0,1])
return qml.sample(ParticleNumber(wires=FermionicDevice.wires))
"""
num_params = 1
num_wires = 2
par_domain = "R"
grad_method = None
grad_recipe = None
@classmethod
def fermion_operator(cls, wires, par):
theta = par[0]
l_obj = ("fphase", wires.tolist(), [theta % (2 * np.pi)])
return l_obj
class ParticleNumber(FermionObservable):
r"""ParticleNumber observable
expectation value of particle number operator of all wires
Args:
arg1 (int): number of the wire
**Example**
FermionicDevice = FermionDevice(shots = 5, username = username, password = password)
@qml.qnode(FermionicDevice)
def quantum_circuit(alpha=0):
Load(wires = 0)
Load(wires = 3)
return qml.sample(ParticleNumber(wires=FermionicDevice.wires))
"""
num_params = 0
num_wires = AnyWires
par_domain = None
@classmethod
def fermion_operator(cls, samples):
return samples
class PauliZ(FermionObservable):
"""PauliZ observable
expectation value of 1-2*ParticleNumber on a specific wire.
"""
num_params = 0
num_wires = 1
par_domain = None
@classmethod
def fermion_operator(cls, samples):
return samples
class Identity(FermionObservable):
"""Identity observable
expectation value of the identity operator
"""
num_params = 0
num_wires = AnyWires
par_domain = None
@classmethod
def fermion_operator(cls, samples):
return samples
| 6,782 | 24.02952 | 120 | py |
pennylane-ls | pennylane-ls-master/tests/test_multi_qudit.py | """
Tests for the multi qudit device.
"""
import unittest
import numpy as np
import pennylane as qml
from pennylane_ls import multi_qudit_ops
class TestMultiQuditDevice(unittest.TestCase):
"""
The test case for the multi qudit device.
"""
def setUp(self):
self.username = "synqs_test"
self.password = "Cm2TXfRmXennMQ5"
self.test_device = qml.device(
"synqs.mqs",
shots=50,
username=self.username,
password=self.password,
blocking=True,
)
def test_creation(self):
"""
Test the creation of the Device
"""
test_device = qml.device("synqs.mqs")
self.assertEqual(
test_device.operations, {"RLXLY", "RLZLZ", "Load", "RLX", "RLZ", "RLZ2"}
)
def test_rX_gate(self):
"""
Test the rX gate
"""
@qml.qnode(self.test_device)
def quantum_circuit():
multi_qudit_ops.Load(50, wires=0)
multi_qudit_ops.RLX(np.pi, wires=0)
return qml.expval(multi_qudit_ops.ZObs(0))
res = quantum_circuit()
self.assertEqual(int(res), 50)
| 1,179 | 23.081633 | 84 | py |
pennylane-ls | pennylane-ls-master/tests/test_fermion_device.py | """
Tests for the femrion device.
"""
import unittest
import numpy as np
import pennylane as qml
from pennylane_ls import fermion_ops
class TestFermionDevice(unittest.TestCase):
"""
The test case for the fermion device.
"""
def setUp(self):
self.username = "synqs_test"
self.password = "Cm2TXfRmXennMQ5"
self.test_device = qml.device(
"synqs.fs",
shots=50,
username=self.username,
password=self.password,
blocking=True,
)
def test_creation(self):
"""
Test that it is possible to create the device.
"""
test_device = qml.device("synqs.fs")
self.assertEqual(
test_device.operations,
{
"ChemicalPotential",
"HartreeFock",
"Hop",
"Inter",
"Load",
"OnSiteInteraction",
"Phase",
"Tunneling",
},
)
def test_creation_with_user(self):
"""
Test creation with username
"""
test_device = qml.device(
"synqs.fs",
shots=50,
username=self.username,
password=self.password,
blocking=False,
)
self.assertEqual(
test_device.operations,
{
"ChemicalPotential",
"HartreeFock",
"Hop",
"Inter",
"Load",
"OnSiteInteraction",
"Phase",
"Tunneling",
},
)
def test_load_gate(self):
"""
Test the load gate
"""
@qml.qnode(self.test_device)
def simple_loading():
"""
The circuit that simulates the experiments.
theta ... angle of the hopping
"""
# load atoms
fermion_ops.Load(wires=0)
fermion_ops.Load(wires=1)
obs = fermion_ops.ParticleNumber([0, 1, 2, 3])
return qml.expval(obs)
res = simple_loading()
self.assertListEqual(list(res), [1.0, 1.0, 0.0, 0.0])
def test_hop_gate(self):
"""
Test the hopping gate
"""
@qml.qnode(self.test_device)
def simple_hopping():
"""
The circuit that simulates the experiments.
theta ... angle of the hopping
"""
# load atoms
fermion_ops.Load(wires=0)
fermion_ops.Load(wires=1)
fermion_ops.Hop(np.pi, wires=[0, 1, 2, 3])
obs = fermion_ops.ParticleNumber([0, 1, 2, 3])
return qml.expval(obs)
res = simple_hopping()
self.assertListEqual(list(res), [0.0, 0.0, 1.0, 1.0])
def test_probs(self):
"""
Test the probs gate
"""
@qml.qnode(self.test_device)
def simple_hopping():
"""
The circuit that simulates the experiments.
theta ... angle of the hopping
"""
# load atoms
fermion_ops.Load(wires=0)
fermion_ops.Load(wires=1)
fermion_ops.Hop(np.pi, wires=[0, 1, 2, 3])
fermion_ops.ParticleNumber([0, 1, 2, 3])
return qml.probs(wires=[3])
res = simple_hopping()
self.assertListEqual(list(res), [0.0, 1.0])
def test_var(self):
"""
Test that the variance is properly defined.
"""
@qml.qnode(self.test_device)
def simple_hopping():
"""
The circuit that simulates the experiments.
theta ... angle of the hopping
"""
# load atoms
fermion_ops.Load(wires=0)
fermion_ops.Load(wires=1)
fermion_ops.Hop(np.pi, wires=[0, 1, 2, 3])
obs = fermion_ops.ParticleNumber([0, 1, 2, 3])
return qml.var(obs)
res = simple_hopping()
self.assertListEqual(list(res), [0.0, 0.0, 0.0, 0.0])
| 4,096 | 24.93038 | 61 | py |
pennylane-ls | pennylane-ls-master/tests/test_single_qudit.py | """
Tests for the single qudit device.
"""
import unittest
import numpy as np
import pennylane as qml
from pennylane_ls import single_qudit_ops
class TestSingleQuditDevice(unittest.TestCase):
"""
The test case for the single qudit device.
"""
def setUp(self):
self.username = "synqs_test"
self.password = "Cm2TXfRmXennMQ5"
self.test_device = qml.device(
"synqs.sqs",
shots=50,
username=self.username,
password=self.password,
blocking=True,
)
def test_creation(self):
"""
Make sure that we can create the device.
"""
test_device = qml.device("synqs.sqs")
self.assertEqual(test_device.operations, {"Load", "RLX", "RLZ", "RLZ2"})
def test_creation_with_user(self):
"""
Test creation with username
"""
test_device = qml.device(
"synqs.sqs",
shots=50,
username=self.username,
password=self.password,
blocking=True,
)
self.assertEqual(test_device.operations, {"Load", "RLX", "RLZ", "RLZ2"})
def test_load_gate(self):
"""
Test the load gate
"""
@qml.qnode(self.test_device)
def quantum_circuit():
single_qudit_ops.Load(50, wires=0)
single_qudit_ops.RLX(np.pi, wires=0)
return qml.expval(single_qudit_ops.ZObs(0))
res = quantum_circuit()
self.assertEqual(int(res), 50)
def test_rX_gate(self):
"""
Test the rX gate
"""
@qml.qnode(self.test_device)
def quantum_circuit():
single_qudit_ops.Load(50, wires=0)
single_qudit_ops.RLX(np.pi, wires=0)
return qml.expval(single_qudit_ops.ZObs(0))
res = quantum_circuit()
self.assertEqual(int(res), 50)
| 1,902 | 24.373333 | 80 | py |
GATNE | GATNE-master/src/main.py | import math
import os
import sys
import time
import numpy as np
import tensorflow as tf
from numpy import random
from utils import *
def get_batches(pairs, neighbors, batch_size):
n_batches = (len(pairs) + (batch_size - 1)) // batch_size
for idx in range(n_batches):
x, y, t, neigh = [], [], [], []
for i in range(batch_size):
index = idx * batch_size + i
if index >= len(pairs):
break
x.append(pairs[index][0])
y.append(pairs[index][1])
t.append(pairs[index][2])
neigh.append(neighbors[pairs[index][0]])
yield (np.array(x).astype(np.int32), np.array(y).reshape(-1, 1).astype(np.int32), np.array(t).astype(np.int32), np.array(neigh).astype(np.int32))
def train_model(network_data, feature_dic, log_name):
vocab, index2word, train_pairs = generate(network_data, args.num_walks, args.walk_length, args.schema, file_name, args.window_size, args.num_workers, args.walk_file)
edge_types = list(network_data.keys())
num_nodes = len(index2word)
edge_type_count = len(edge_types)
epochs = args.epoch
batch_size = args.batch_size
embedding_size = args.dimensions # Dimension of the embedding vector.
embedding_u_size = args.edge_dim
u_num = edge_type_count
num_sampled = args.negative_samples # Number of negative examples to sample.
dim_a = args.att_dim
att_head = 1
neighbor_samples = args.neighbor_samples
neighbors = generate_neighbors(network_data, vocab, num_nodes, edge_types, neighbor_samples)
graph = tf.Graph()
if feature_dic is not None:
feature_dim = len(list(feature_dic.values())[0])
print('feature dimension: ' + str(feature_dim))
features = np.zeros((num_nodes, feature_dim), dtype=np.float32)
for key, value in feature_dic.items():
if key in vocab:
features[vocab[key].index, :] = np.array(value)
with graph.as_default():
global_step = tf.Variable(0, name='global_step', trainable=False)
if feature_dic is not None:
node_features = tf.Variable(features, name='node_features', trainable=False)
feature_weights = tf.Variable(tf.truncated_normal([feature_dim, embedding_size], stddev=1.0))
embed_trans = tf.Variable(tf.truncated_normal([feature_dim, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
u_embed_trans = tf.Variable(tf.truncated_normal([edge_type_count, feature_dim, embedding_u_size], stddev=1.0 / math.sqrt(embedding_size)))
else:
node_embeddings = tf.Variable(tf.random_uniform([num_nodes, embedding_size], -1.0, 1.0))
node_type_embeddings = tf.Variable(tf.random_uniform([num_nodes, u_num, embedding_u_size], -1.0, 1.0))
trans_weights = tf.Variable(tf.truncated_normal([edge_type_count, embedding_u_size, embedding_size // att_head], stddev=1.0 / math.sqrt(embedding_size)))
trans_weights_s1 = tf.Variable(tf.truncated_normal([edge_type_count, embedding_u_size, dim_a], stddev=1.0 / math.sqrt(embedding_size)))
trans_weights_s2 = tf.Variable(tf.truncated_normal([edge_type_count, dim_a, att_head], stddev=1.0 / math.sqrt(embedding_size)))
nce_weights = tf.Variable(tf.truncated_normal([num_nodes, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_nodes]))
# Input data
train_inputs = tf.placeholder(tf.int32, shape=[None])
train_labels = tf.placeholder(tf.int32, shape=[None, 1])
train_types = tf.placeholder(tf.int32, shape=[None])
node_neigh = tf.placeholder(tf.int32, shape=[None, edge_type_count, neighbor_samples])
# Look up embeddings for nodes
if feature_dic is not None:
node_embed = tf.nn.embedding_lookup(node_features, train_inputs)
node_embed = tf.matmul(node_embed, embed_trans)
else:
node_embed = tf.nn.embedding_lookup(node_embeddings, train_inputs)
if feature_dic is not None:
node_embed_neighbors = tf.nn.embedding_lookup(node_features, node_neigh)
node_embed_tmp = tf.concat([tf.matmul(tf.reshape(tf.slice(node_embed_neighbors, [0, i, 0, 0], [-1, 1, -1, -1]), [-1, feature_dim]), tf.reshape(tf.slice(u_embed_trans, [i, 0, 0], [1, -1, -1]), [feature_dim, embedding_u_size])) for i in range(edge_type_count)], axis=0)
node_type_embed = tf.transpose(tf.reduce_mean(tf.reshape(node_embed_tmp, [edge_type_count, -1, neighbor_samples, embedding_u_size]), axis=2), perm=[1,0,2])
else:
node_embed_neighbors = tf.nn.embedding_lookup(node_type_embeddings, node_neigh)
node_embed_tmp = tf.concat([tf.reshape(tf.slice(node_embed_neighbors, [0, i, 0, i, 0], [-1, 1, -1, 1, -1]), [1, -1, neighbor_samples, embedding_u_size]) for i in range(edge_type_count)], axis=0)
node_type_embed = tf.transpose(tf.reduce_mean(node_embed_tmp, axis=2), perm=[1,0,2])
trans_w = tf.nn.embedding_lookup(trans_weights, train_types)
trans_w_s1 = tf.nn.embedding_lookup(trans_weights_s1, train_types)
trans_w_s2 = tf.nn.embedding_lookup(trans_weights_s2, train_types)
attention = tf.reshape(tf.nn.softmax(tf.reshape(tf.matmul(tf.tanh(tf.matmul(node_type_embed, trans_w_s1)), trans_w_s2), [-1, u_num])), [-1, att_head, u_num])
node_type_embed = tf.matmul(attention, node_type_embed)
node_embed = node_embed + tf.reshape(tf.matmul(node_type_embed, trans_w), [-1, embedding_size])
if feature_dic is not None:
node_feat = tf.nn.embedding_lookup(node_features, train_inputs)
node_embed = node_embed + tf.matmul(node_feat, feature_weights)
last_node_embed = tf.nn.l2_normalize(node_embed, axis=1)
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=last_node_embed,
num_sampled=num_sampled,
num_classes=num_nodes))
plot_loss = tf.summary.scalar("loss", loss)
# Optimizer.
optimizer = tf.train.AdamOptimizer().minimize(loss, global_step=global_step)
# Add ops to save and restore all the variables.
# saver = tf.train.Saver(max_to_keep=20)
merged = tf.summary.merge_all(key=tf.GraphKeys.SUMMARIES)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
print("Optimizing")
with tf.Session(graph=graph) as sess:
writer = tf.summary.FileWriter("./runs/" + log_name, sess.graph) # tensorboard --logdir=./runs
sess.run(init)
print('Training')
g_iter = 0
best_score = 0
test_score = (0.0, 0.0, 0.0)
patience = 0
for epoch in range(epochs):
random.shuffle(train_pairs)
batches = get_batches(train_pairs, neighbors, batch_size)
data_iter = tqdm(batches,
desc="epoch %d" % (epoch),
total=(len(train_pairs) + (batch_size - 1)) // batch_size,
bar_format="{l_bar}{r_bar}")
avg_loss = 0.0
for i, data in enumerate(data_iter):
feed_dict = {train_inputs: data[0], train_labels: data[1], train_types: data[2], node_neigh: data[3]}
_, loss_value, summary_str = sess.run([optimizer, loss, merged], feed_dict)
writer.add_summary(summary_str, g_iter)
g_iter += 1
avg_loss += loss_value
if i % 5000 == 0:
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss_value
}
data_iter.write(str(post_fix))
final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
for i in range(edge_type_count):
for j in range(num_nodes):
final_model[edge_types[i]][index2word[j]] = np.array(sess.run(last_node_embed, {train_inputs: [j], train_types: [i], node_neigh: [neighbors[j]]})[0])
valid_aucs, valid_f1s, valid_prs = [], [], []
test_aucs, test_f1s, test_prs = [], [], []
for i in range(edge_type_count):
if args.eval_type == 'all' or edge_types[i] in args.eval_type.split(','):
tmp_auc, tmp_f1, tmp_pr = evaluate(final_model[edge_types[i]], valid_true_data_by_edge[edge_types[i]], valid_false_data_by_edge[edge_types[i]])
valid_aucs.append(tmp_auc)
valid_f1s.append(tmp_f1)
valid_prs.append(tmp_pr)
tmp_auc, tmp_f1, tmp_pr = evaluate(final_model[edge_types[i]], testing_true_data_by_edge[edge_types[i]], testing_false_data_by_edge[edge_types[i]])
test_aucs.append(tmp_auc)
test_f1s.append(tmp_f1)
test_prs.append(tmp_pr)
print('valid auc:', np.mean(valid_aucs))
print('valid pr:', np.mean(valid_prs))
print('valid f1:', np.mean(valid_f1s))
average_auc = np.mean(test_aucs)
average_f1 = np.mean(test_f1s)
average_pr = np.mean(test_prs)
cur_score = np.mean(valid_aucs)
if cur_score > best_score:
best_score = cur_score
test_score = (average_auc, average_f1, average_pr)
patience = 0
else:
patience += 1
if patience > args.patience:
print('Early Stopping')
break
return test_score
if __name__ == "__main__":
args = parse_args()
file_name = args.input
print(args)
if args.features is not None:
feature_dic = load_feature_data(args.features)
else:
feature_dic = None
log_name = file_name.split('/')[-1] + f'_evaltype_{args.eval_type}_b_{args.batch_size}_e_{args.epoch}'
training_data_by_type = load_training_data(file_name + '/train.txt')
valid_true_data_by_edge, valid_false_data_by_edge = load_testing_data(file_name + '/valid.txt')
testing_true_data_by_edge, testing_false_data_by_edge = load_testing_data(file_name + '/test.txt')
average_auc, average_f1, average_pr = train_model(training_data_by_type, feature_dic, log_name + '_' + time.strftime('%Y-%m-%d %H-%M-%S',time.localtime(time.time())))
print('Overall ROC-AUC:', average_auc)
print('Overall PR-AUC', average_pr)
print('Overall F1:', average_f1)
| 10,842 | 45.939394 | 279 | py |
GATNE | GATNE-master/src/utils.py | import argparse
import multiprocessing
from collections import defaultdict
from operator import index
import numpy as np
from six import iteritems
from sklearn.metrics import (auc, f1_score, precision_recall_curve,
roc_auc_score)
from tqdm import tqdm
from walk import RWGraph
class Vocab(object):
def __init__(self, count, index):
self.count = count
self.index = index
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='data/amazon',
help='Input dataset path')
parser.add_argument('--features', type=str, default=None,
help='Input node features')
parser.add_argument('--walk-file', type=str, default=None,
help='Input random walks')
parser.add_argument('--epoch', type=int, default=100,
help='Number of epoch. Default is 100.')
parser.add_argument('--batch-size', type=int, default=64,
help='Number of batch_size. Default is 64.')
parser.add_argument('--eval-type', type=str, default='all',
help='The edge type(s) for evaluation.')
parser.add_argument('--schema', type=str, default=None,
help='The metapath schema (e.g., U-I-U,I-U-I).')
parser.add_argument('--dimensions', type=int, default=200,
help='Number of dimensions. Default is 200.')
parser.add_argument('--edge-dim', type=int, default=10,
help='Number of edge embedding dimensions. Default is 10.')
parser.add_argument('--att-dim', type=int, default=20,
help='Number of attention dimensions. Default is 20.')
parser.add_argument('--walk-length', type=int, default=10,
help='Length of walk per source. Default is 10.')
parser.add_argument('--num-walks', type=int, default=20,
help='Number of walks per source. Default is 20.')
parser.add_argument('--window-size', type=int, default=5,
help='Context size for optimization. Default is 5.')
parser.add_argument('--negative-samples', type=int, default=5,
help='Negative samples for optimization. Default is 5.')
parser.add_argument('--neighbor-samples', type=int, default=10,
help='Neighbor samples for aggregation. Default is 10.')
parser.add_argument('--patience', type=int, default=5,
help='Early stopping patience. Default is 5.')
parser.add_argument('--num-workers', type=int, default=16,
help='Number of workers for generating random walks. Default is 16.')
return parser.parse_args()
def get_G_from_edges(edges):
edge_dict = defaultdict(set)
for edge in edges:
u, v = str(edge[0]), str(edge[1])
edge_dict[u].add(v)
edge_dict[v].add(u)
return edge_dict
def load_training_data(f_name):
print('We are loading data from:', f_name)
edge_data_by_type = dict()
all_nodes = list()
with open(f_name, 'r') as f:
for line in f:
words = line[:-1].split(' ')
if words[0] not in edge_data_by_type:
edge_data_by_type[words[0]] = list()
x, y = words[1], words[2]
edge_data_by_type[words[0]].append((x, y))
all_nodes.append(x)
all_nodes.append(y)
all_nodes = list(set(all_nodes))
print('Total training nodes: ' + str(len(all_nodes)))
return edge_data_by_type
def load_testing_data(f_name):
print('We are loading data from:', f_name)
true_edge_data_by_type = dict()
false_edge_data_by_type = dict()
all_nodes = list()
with open(f_name, 'r') as f:
for line in f:
words = line[:-1].split(' ')
x, y = words[1], words[2]
if int(words[3]) == 1:
if words[0] not in true_edge_data_by_type:
true_edge_data_by_type[words[0]] = list()
true_edge_data_by_type[words[0]].append((x, y))
else:
if words[0] not in false_edge_data_by_type:
false_edge_data_by_type[words[0]] = list()
false_edge_data_by_type[words[0]].append((x, y))
all_nodes.append(x)
all_nodes.append(y)
all_nodes = list(set(all_nodes))
return true_edge_data_by_type, false_edge_data_by_type
def load_node_type(f_name):
print('We are loading node type from:', f_name)
node_type = {}
with open(f_name, 'r') as f:
for line in f:
items = line.strip().split()
node_type[items[0]] = items[1]
return node_type
def load_feature_data(f_name):
feature_dic = {}
with open(f_name, 'r') as f:
first = True
for line in f:
if first:
first = False
continue
items = line.strip().split()
feature_dic[items[0]] = items[1:]
return feature_dic
def generate_walks(network_data, num_walks, walk_length, schema, file_name, num_workers):
if schema is not None:
node_type = load_node_type(file_name + '/node_type.txt')
else:
node_type = None
all_walks = []
for layer_id, layer_name in enumerate(network_data):
tmp_data = network_data[layer_name]
# start to do the random walk on a layer
layer_walker = RWGraph(get_G_from_edges(tmp_data), node_type, num_workers)
print('Generating random walks for layer', layer_id)
layer_walks = layer_walker.simulate_walks(num_walks, walk_length, schema=schema)
all_walks.append(layer_walks)
print('Finish generating the walks')
return all_walks
def generate_pairs(all_walks, vocab, window_size, num_workers):
pairs = []
skip_window = window_size // 2
for layer_id, walks in enumerate(all_walks):
print('Generating training pairs for layer', layer_id)
for walk in tqdm(walks):
for i in range(len(walk)):
for j in range(1, skip_window + 1):
if i - j >= 0:
pairs.append((vocab[walk[i]].index, vocab[walk[i - j]].index, layer_id))
if i + j < len(walk):
pairs.append((vocab[walk[i]].index, vocab[walk[i + j]].index, layer_id))
return pairs
def generate_vocab(all_walks):
index2word = []
raw_vocab = defaultdict(int)
for layer_id, walks in enumerate(all_walks):
print('Counting vocab for layer', layer_id)
for walk in tqdm(walks):
for word in walk:
raw_vocab[word] += 1
vocab = {}
for word, v in iteritems(raw_vocab):
vocab[word] = Vocab(count=v, index=len(index2word))
index2word.append(word)
index2word.sort(key=lambda word: vocab[word].count, reverse=True)
for i, word in enumerate(index2word):
vocab[word].index = i
return vocab, index2word
def load_walks(walk_file):
print('Loading walks')
all_walks = []
with open(walk_file, 'r') as f:
for line in f:
content = line.strip().split()
layer_id = int(content[0])
if layer_id >= len(all_walks):
all_walks.append([])
all_walks[layer_id].append(content[1:])
return all_walks
def save_walks(walk_file, all_walks):
with open(walk_file, 'w') as f:
for layer_id, walks in enumerate(all_walks):
print('Saving walks for layer', layer_id)
for walk in tqdm(walks):
f.write(' '.join([str(layer_id)] + [str(x) for x in walk]) + '\n')
def generate(network_data, num_walks, walk_length, schema, file_name, window_size, num_workers, walk_file):
if walk_file is not None:
all_walks = load_walks(walk_file)
else:
all_walks = generate_walks(network_data, num_walks, walk_length, schema, file_name, num_workers)
save_walks(file_name + '/walks.txt', all_walks)
vocab, index2word = generate_vocab(all_walks)
train_pairs = generate_pairs(all_walks, vocab, window_size, num_workers)
return vocab, index2word, train_pairs
def generate_neighbors(network_data, vocab, num_nodes, edge_types, neighbor_samples):
edge_type_count = len(edge_types)
neighbors = [[[] for __ in range(edge_type_count)] for _ in range(num_nodes)]
for r in range(edge_type_count):
print('Generating neighbors for layer', r)
g = network_data[edge_types[r]]
for (x, y) in tqdm(g):
ix = vocab[x].index
iy = vocab[y].index
neighbors[ix][r].append(iy)
neighbors[iy][r].append(ix)
for i in range(num_nodes):
if len(neighbors[i][r]) == 0:
neighbors[i][r] = [i] * neighbor_samples
elif len(neighbors[i][r]) < neighbor_samples:
neighbors[i][r].extend(list(np.random.choice(neighbors[i][r], size=neighbor_samples-len(neighbors[i][r]))))
elif len(neighbors[i][r]) > neighbor_samples:
neighbors[i][r] = list(np.random.choice(neighbors[i][r], size=neighbor_samples))
return neighbors
def get_score(local_model, node1, node2):
try:
vector1 = local_model[node1]
vector2 = local_model[node2]
return np.dot(vector1, vector2) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))
except Exception as e:
pass
def evaluate(model, true_edges, false_edges):
true_list = list()
prediction_list = list()
true_num = 0
for edge in true_edges:
tmp_score = get_score(model, str(edge[0]), str(edge[1]))
if tmp_score is not None:
true_list.append(1)
prediction_list.append(tmp_score)
true_num += 1
for edge in false_edges:
tmp_score = get_score(model, str(edge[0]), str(edge[1]))
if tmp_score is not None:
true_list.append(0)
prediction_list.append(tmp_score)
sorted_pred = prediction_list[:]
sorted_pred.sort()
threshold = sorted_pred[-true_num]
y_pred = np.zeros(len(prediction_list), dtype=np.int32)
for i in range(len(prediction_list)):
if prediction_list[i] >= threshold:
y_pred[i] = 1
y_true = np.array(true_list)
y_scores = np.array(prediction_list)
ps, rs, _ = precision_recall_curve(y_true, y_scores)
return roc_auc_score(y_true, y_scores), f1_score(y_true, y_pred), auc(rs, ps)
| 10,598 | 35.297945 | 123 | py |
GATNE | GATNE-master/src/walk.py | import random
import multiprocessing
from tqdm import tqdm
def walk(args):
walk_length, start, schema = args
# Simulate a random walk starting from start node.
rand = random.Random()
if schema:
schema_items = schema.split('-')
assert schema_items[0] == schema_items[-1]
walk = [start]
while len(walk) < walk_length:
cur = walk[-1]
candidates = []
for node in G[cur]:
if schema == '' or node_type[node] == schema_items[len(walk) % (len(schema_items) - 1)]:
candidates.append(node)
if candidates:
walk.append(rand.choice(candidates))
else:
break
return [str(node) for node in walk]
def initializer(init_G, init_node_type):
global G
G = init_G
global node_type
node_type = init_node_type
class RWGraph():
def __init__(self, nx_G, node_type_arr=None, num_workers=16):
self.G = nx_G
self.node_type = node_type_arr
self.num_workers = num_workers
def node_list(self, nodes, num_walks):
for loop in range(num_walks):
for node in nodes:
yield node
def simulate_walks(self, num_walks, walk_length, schema=None):
all_walks = []
nodes = list(self.G.keys())
random.shuffle(nodes)
if schema is None:
with multiprocessing.Pool(self.num_workers, initializer=initializer, initargs=(self.G, self.node_type)) as pool:
all_walks = list(pool.imap(walk, ((walk_length, node, '') for node in tqdm(self.node_list(nodes, num_walks))), chunksize=256))
else:
schema_list = schema.split(',')
for schema_iter in schema_list:
with multiprocessing.Pool(self.num_workers, initializer=initializer, initargs=(self.G, self.node_type)) as pool:
walks = list(pool.imap(walk, ((walk_length, node, schema_iter) for node in tqdm(self.node_list(nodes, num_walks)) if schema_iter.split('-')[0] == self.node_type[node]), chunksize=512))
all_walks.extend(walks)
return all_walks
| 2,121 | 33.786885 | 204 | py |
GATNE | GATNE-master/src/main_pytorch.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy import random
from torch.nn.parameter import Parameter
from utils import *
def get_batches(pairs, neighbors, batch_size):
n_batches = (len(pairs) + (batch_size - 1)) // batch_size
for idx in range(n_batches):
x, y, t, neigh = [], [], [], []
for i in range(batch_size):
index = idx * batch_size + i
if index >= len(pairs):
break
x.append(pairs[index][0])
y.append(pairs[index][1])
t.append(pairs[index][2])
neigh.append(neighbors[pairs[index][0]])
yield torch.tensor(x), torch.tensor(y), torch.tensor(t), torch.tensor(neigh)
class GATNEModel(nn.Module):
def __init__(
self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features
):
super(GATNEModel, self).__init__()
self.num_nodes = num_nodes
self.embedding_size = embedding_size
self.embedding_u_size = embedding_u_size
self.edge_type_count = edge_type_count
self.dim_a = dim_a
self.features = None
if features is not None:
self.features = features
feature_dim = self.features.shape[-1]
self.embed_trans = Parameter(torch.FloatTensor(feature_dim, embedding_size))
self.u_embed_trans = Parameter(torch.FloatTensor(edge_type_count, feature_dim, embedding_u_size))
else:
self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.node_type_embeddings = Parameter(
torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)
)
self.trans_weights = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)
)
self.trans_weights_s1 = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)
)
self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))
self.reset_parameters()
def reset_parameters(self):
if self.features is not None:
self.embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.u_embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
else:
self.node_embeddings.data.uniform_(-1.0, 1.0)
self.node_type_embeddings.data.uniform_(-1.0, 1.0)
self.trans_weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, train_inputs, train_types, node_neigh):
if self.features is None:
node_embed = self.node_embeddings[train_inputs]
node_embed_neighbors = self.node_type_embeddings[node_neigh]
else:
node_embed = torch.mm(self.features[train_inputs], self.embed_trans)
node_embed_neighbors = torch.einsum('bijk,akm->bijam', self.features[node_neigh], self.u_embed_trans)
node_embed_tmp = torch.diagonal(node_embed_neighbors, dim1=1, dim2=3).permute(0, 3, 1, 2)
node_type_embed = torch.sum(node_embed_tmp, dim=2)
trans_w = self.trans_weights[train_types]
trans_w_s1 = self.trans_weights_s1[train_types]
trans_w_s2 = self.trans_weights_s2[train_types]
attention = F.softmax(
torch.matmul(
torch.tanh(torch.matmul(node_type_embed, trans_w_s1)), trans_w_s2
).squeeze(2),
dim=1,
).unsqueeze(1)
node_type_embed = torch.matmul(attention, node_type_embed)
node_embed = node_embed + torch.matmul(node_type_embed, trans_w).squeeze(1)
last_node_embed = F.normalize(node_embed, dim=1)
return last_node_embed
class NSLoss(nn.Module):
def __init__(self, num_nodes, num_sampled, embedding_size):
super(NSLoss, self).__init__()
self.num_nodes = num_nodes
self.num_sampled = num_sampled
self.embedding_size = embedding_size
self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.sample_weights = F.normalize(
torch.Tensor(
[
(math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
for k in range(num_nodes)
]
),
dim=0,
)
self.reset_parameters()
def reset_parameters(self):
self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, input, embs, label):
n = input.shape[0]
log_target = torch.log(
torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))
)
negs = torch.multinomial(
self.sample_weights, self.num_sampled * n, replacement=True
).view(n, self.num_sampled)
noise = torch.neg(self.weights[negs])
sum_log_sampled = torch.sum(
torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
).squeeze()
loss = log_target + sum_log_sampled
return -loss.sum() / n
def train_model(network_data, feature_dic):
vocab, index2word, train_pairs = generate(network_data, args.num_walks, args.walk_length, args.schema, file_name, args.window_size, args.num_workers, args.walk_file)
edge_types = list(network_data.keys())
num_nodes = len(index2word)
edge_type_count = len(edge_types)
epochs = args.epoch
batch_size = args.batch_size
embedding_size = args.dimensions
embedding_u_size = args.edge_dim
u_num = edge_type_count
num_sampled = args.negative_samples
dim_a = args.att_dim
att_head = 1
neighbor_samples = args.neighbor_samples
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
neighbors = generate_neighbors(network_data, vocab, num_nodes, edge_types, neighbor_samples)
features = None
if feature_dic is not None:
feature_dim = len(list(feature_dic.values())[0])
print('feature dimension: ' + str(feature_dim))
features = np.zeros((num_nodes, feature_dim), dtype=np.float32)
for key, value in feature_dic.items():
if key in vocab:
features[vocab[key].index, :] = np.array(value)
features = torch.FloatTensor(features).to(device)
model = GATNEModel(
num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features
)
nsloss = NSLoss(num_nodes, num_sampled, embedding_size)
model.to(device)
nsloss.to(device)
optimizer = torch.optim.Adam(
[{"params": model.parameters()}, {"params": nsloss.parameters()}], lr=1e-4
)
best_score = 0
test_score = (0.0, 0.0, 0.0)
patience = 0
for epoch in range(epochs):
random.shuffle(train_pairs)
batches = get_batches(train_pairs, neighbors, batch_size)
data_iter = tqdm(
batches,
desc="epoch %d" % (epoch),
total=(len(train_pairs) + (batch_size - 1)) // batch_size,
bar_format="{l_bar}{r_bar}",
)
avg_loss = 0.0
for i, data in enumerate(data_iter):
optimizer.zero_grad()
embs = model(data[0].to(device), data[2].to(device), data[3].to(device),)
loss = nsloss(data[0].to(device), embs, data[1].to(device))
loss.backward()
optimizer.step()
avg_loss += loss.item()
if i % 5000 == 0:
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss.item(),
}
data_iter.write(str(post_fix))
final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
for i in range(num_nodes):
train_inputs = torch.tensor([i for _ in range(edge_type_count)]).to(device)
train_types = torch.tensor(list(range(edge_type_count))).to(device)
node_neigh = torch.tensor(
[neighbors[i] for _ in range(edge_type_count)]
).to(device)
node_emb = model(train_inputs, train_types, node_neigh)
for j in range(edge_type_count):
final_model[edge_types[j]][index2word[i]] = (
node_emb[j].cpu().detach().numpy()
)
valid_aucs, valid_f1s, valid_prs = [], [], []
test_aucs, test_f1s, test_prs = [], [], []
for i in range(edge_type_count):
if args.eval_type == "all" or edge_types[i] in args.eval_type.split(","):
tmp_auc, tmp_f1, tmp_pr = evaluate(
final_model[edge_types[i]],
valid_true_data_by_edge[edge_types[i]],
valid_false_data_by_edge[edge_types[i]],
)
valid_aucs.append(tmp_auc)
valid_f1s.append(tmp_f1)
valid_prs.append(tmp_pr)
tmp_auc, tmp_f1, tmp_pr = evaluate(
final_model[edge_types[i]],
testing_true_data_by_edge[edge_types[i]],
testing_false_data_by_edge[edge_types[i]],
)
test_aucs.append(tmp_auc)
test_f1s.append(tmp_f1)
test_prs.append(tmp_pr)
print("valid auc:", np.mean(valid_aucs))
print("valid pr:", np.mean(valid_prs))
print("valid f1:", np.mean(valid_f1s))
average_auc = np.mean(test_aucs)
average_f1 = np.mean(test_f1s)
average_pr = np.mean(test_prs)
cur_score = np.mean(valid_aucs)
if cur_score > best_score:
best_score = cur_score
test_score = (average_auc, average_f1, average_pr)
patience = 0
else:
patience += 1
if patience > args.patience:
print("Early Stopping")
break
return test_score
if __name__ == "__main__":
args = parse_args()
file_name = args.input
print(args)
if args.features is not None:
feature_dic = load_feature_data(args.features)
else:
feature_dic = None
training_data_by_type = load_training_data(file_name + "/train.txt")
valid_true_data_by_edge, valid_false_data_by_edge = load_testing_data(
file_name + "/valid.txt"
)
testing_true_data_by_edge, testing_false_data_by_edge = load_testing_data(
file_name + "/test.txt"
)
average_auc, average_f1, average_pr = train_model(training_data_by_type, feature_dic)
print("Overall ROC-AUC:", average_auc)
print("Overall PR-AUC", average_pr)
print("Overall F1:", average_f1)
| 10,920 | 36.400685 | 169 | py |
Viola-Unet | Viola-Unet-main/main.py | import argparse, os
import time
import numpy as np
import torch
from load_model import load_model, infer_seg, nibout, infer_seg_3
from load_data import load_data, post_process, read_raw_image
from monai.transforms import SaveImaged
from monai.data import decollate_batch
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ICH segmentation of a ct volume')
parser.add_argument('--input_dir', default='', type=str, metavar='PATH',
help='this directory contains all test samples(ct volumes)')
parser.add_argument('--predict_dir', default='', type=str, metavar='PATH',
help='segmentation file of each test sample should be stored in the directory')
args = parser.parse_args()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
models = [] # ensemble models, stack together
models.append(load_model(network="nnUNet", kf="kf0", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf1", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf2", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf3", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf4", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf0", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf1", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf2", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf3", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf4", device=device).eval())
test_file_list, dataloader = load_data(args.input_dir)
with torch.no_grad():
num_scans = len(dataloader)
for i, d in enumerate(dataloader):
path, filename = os.path.split(test_file_list[i]['image'])
raw_data = read_raw_image(test_file_list[i])
raw_img = raw_data["image"]
pixdims = raw_data["image_meta_dict"]["pixdim"][1:4]
pix_volume = pixdims[0] * pixdims[1] * pixdims[2] # mm^3
images = d["image"].to(device)
print('\n------------------start predicting input volume: {0} - {1}/{2} -------------------'.format(filename, i + 1, num_scans))
# print("image size after preprocessed: ", images.size())
_, _, h, w, z = images.size()
print("h, w, z: ", images.size())
max_size = h if h>w else w
max_size = max_size if max_size > z else z
overlap = 1-(max_size - 160)/(2*160)
overlap = 0 if overlap<0 else round(overlap, 2)
print("overlap: ", overlap)
### make sure the last slize is z-axial, z must be smallest number
reshape=None
mid_reshape=None # switch w and z
if h<z and h<w:
print("We discovered some errors in the head information, tried to fix here but the predict will still not work well in this case...")
reshape=(0, 1, 4, 3, 2)
images=images.permute(reshape)
# print("reshaped input", images.size())
elif w<z and w<h:
mid_reshape=(0, 1, 2, 4, 3)
# images=images.permute(reshape)
# we can fix the error head infor, but the model were trained with this error case, so at this time, we don't fix it
# we leave this error for future work
print("We discovered some errors in the head information without trying to fix it, so the predict will not work well in this case...")
start_time = time.time()
pred_outputs = list()
for m in models: # in this case, we only have one model
pred = infer_seg(images, m, overlap=overlap)
pred_outputs.append(pred)
if mid_reshape is not None:
print("try using corrected oritentation to infer ...")
pred2 = infer_seg(images.permute(mid_reshape), m, overlap=overlap).permute(mid_reshape)
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1))<torch.sum(torch.argmax(torch.softmax(pred2, 1), 1)):
pred_outputs.append(pred2)
else:
print("did not use the corrected oritentation ...")
# # do aumentation if bleeds are too small or not found
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True))<10.:
print('small object, trying to TTA boost ...')
pred = infer_seg_3(images, m, flip_axis=[2], overlap=overlap)
# print(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True).size())
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True))>10.:
# pred_outputs.pop()
pred_outputs.append(pred)
print('augmented by flip2')
pred = infer_seg_3(images, m, overlap=overlap, flip_axis=[1, 2], rot=1)
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True))>10.:
# pred_outputs.pop()
pred_outputs.append(pred)
print('augmented by flip 1-2 and rot 1')
d["pred"] = torch.mean(torch.stack(pred_outputs, dim=0), dim=0, keepdim=True).squeeze(0)
# print(d["pred"].size())
if reshape is not None:
d["pred"] = d["pred"].permute(reshape)
# print('reshaped size: ',d["pred"].size())
d = [post_process(img) for img in decollate_batch(d)]
d[0]["pred"] = torch.argmax(d[0]["pred"], 0, keepdim=True)
lesion_volume = torch.sum(d[0]["pred"]) * pix_volume / 1000.
print('Predicted lesion volume : {:.3f} ml'.format(lesion_volume))
d[0]["pred"] = d[0]["pred"].squeeze(0)
nibout(
d[0]["pred"].cpu().detach().numpy().astype(np.uint8),
args.predict_dir,
test_file_list[i]['image']
)
print('--------Cost time: {:.3f} sec --------'.format(time.time() - start_time))
| 6,412 | 49.496063 | 150 | py |
Viola-Unet | Viola-Unet-main/load_model.py | import os
import torch
import nibabel as nib
from monai.inferers import sliding_window_inference
from monai.transforms.utils import map_spatial_axes
from monai.data import decollate_batch
from viola_unet import ViolaUNet
from monai.networks.nets import DynUNet
wind_levels = [[0,100], [-15, 200],[-100, 1300]]
spacing = [0.45100001*2, 0.45100001*2, 4.99709511]
patch_size = (160, 160, 32)
patch_overlap = 0.3 # 0.5 for last validation submission
sw_bt_size=1
# debug new setting
# patch_size = (192, 192, 24)
# patch_overlap = 0.75 # 0.5 for last validation submission
# # # last validation submission weights
# net_weights = {
# "ViolaUNet_l":{
# "kf0": "./best_ckpt1/viola/model_epoch_8640_dice_0.80106_lr_0.0000869017.pt",
# "kf1": "./best_ckpt1/viola/model_epoch_4656_dice_0.76015_lr_0.0019869438.pt", # new ft 0.76887
# "kf2": "./best_ckpt1/viola/model_epoch_5940_dice_0.81959_lr_0.0000937899.pt", # new ft 0.82135
# "kf3": "./best_ckpt1/viola/model_epoch_37728_dice_0.78699_lr_0.0033099166.pt", # new ft 0.78784
# "kf4": "./best_ckpt1/viola/model_epoch_12054_dice_0.78984_lr_0.0000753008.pt",
# },
# "nnUNet":{
# "kf0": "./best_ckpt1/nnu/model_epoch_19296_dice_0.80530_lr_0.0042245472.pt",
# "kf1": "./best_ckpt1/nnu/model_epoch_38412_dice_0.76024_lr_0.0022887478.pt", # ft new 0.76780
# "kf2": "./best_ckpt1/nnu/model_epoch_24651_dice_0.81655_lr_0.0037515006.pt", # ft new 0.81775
# "kf3": "./best_ckpt1/nnu/model_epoch_1152_dice_0.79311_lr_0.0049999435.pt",
# "kf4": "./best_ckpt1/nnu/model_epoch_5292_dice_0.78911_lr_0.0049550524.pt",
# },
# }
# # re-fine-tuned version after the last validation submission weights
net_weights = {
"ViolaUNet_l":{
"kf0": "./best_ckpt2/viola/model_epoch_8640_dice_0.80106_lr_0.0000869017.pt",
"kf1": "./best_ckpt2/viola/model_epoch_5820_dice_0.76887_lr_0.0029100000.pt", # new ft 0.76887
"kf2": "./best_ckpt2/viola/model_epoch_297_dice_0.82135_lr_0.0001485000.pt", # new ft 0.82135
"kf3": "./best_ckpt2/viola/model_epoch_288_dice_0.78784_lr_0.0001440000.pt", # new ft 0.78784
"kf4": "./best_ckpt2/viola/model_epoch_12054_dice_0.78984_lr_0.0000753008.pt",
},
"nnUNet":{
"kf0": "./best_ckpt2/nnu/model_epoch_19296_dice_0.80530_lr_0.0042245472.pt",
"kf1": "./best_ckpt2/nnu/model_epoch_6693_dice_0.76780_lr_0.0033465000.pt", # ft new 0.76780
"kf2": "./best_ckpt2/nnu/model_epoch_18414_dice_0.81775_lr_0.0047762088.pt", # ft new 0.81775
"kf3": "./best_ckpt2/nnu/model_epoch_1152_dice_0.79311_lr_0.0049999435.pt",
"kf4": "./best_ckpt2/nnu/model_epoch_5292_dice_0.78911_lr_0.0049550524.pt",
},
}
def load_model(network="ViolaUNet_l", kf="kf0", device='cpu', ckpt=True):
if network == "ViolaUNet_l":
model = ViolaUNet(
spatial_dims=3,
in_channels=3,
out_channels=2,
kernel_size=[[3, 3, 1], [3, 3, 1], [3, 3, 1], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
strides=[[1, 1, 1], [2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
upsample_kernel_size=[[2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
filters=(32, 64, 96, 128, 192, 256, 320),
dec_filters=(32, 64, 96, 128, 192, 256),
norm_name=("BATCH", {"affine": True}),
act_name=("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
dropout=0.2,
deep_supervision=True,
deep_supr_num=4,
res_block=True,
trans_bias=True,
viola_att = True,
gated_att = False,
sum_deep_supr = False,
)
elif network == 'nnUNet':
model = DynUNet(
spatial_dims=3,
in_channels=3,
out_channels=2,
kernel_size=[[3, 3, 1], [3, 3, 1], [3, 3, 1], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
strides=[[1, 1, 1], [2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
upsample_kernel_size=[[2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
filters=(32, 64, 96, 128, 192, 256, 320),
dropout=0.2,
norm_name=("INSTANCE", {"affine": True}),
act_name=("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
deep_supervision=True,
deep_supr_num=4,
res_block=True,
trans_bias=True,
)
# elif network == 'ViolaUNet_s': # for paper figure 1
# model = ViolaUNet(
# spatial_dims=3,
# in_channels=3,
# out_channels=2,
# kernel_size=[[3, 3, 1], [3, 3, 1], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 1]],
# strides=[[1, 1, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 1, 1]],
# upsample_kernel_size=[[2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 1, 1]],
# filters=(32, 64, 96, 128, 192, 256, 320),
# dec_filters=(32, 64, 96, 128, 128, 128),
# norm_name=("BATCH", {"affine": True}),
# act_name=("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
# dropout=0.2,
# deep_supervision=True,
# deep_supr_num=2,
# res_block=False,
# trans_bias=True,
# viola_att = True,
# gated_att = False,
# sum_deep_supr = False,
# )
else:
print("Not support the network currently - ", network)
return None
if ckpt and network != 'ViolaUNet_s': #
pretrain = torch.load(net_weights[network][kf], map_location=device)
model.load_state_dict(pretrain['state_dict'])
print("model {}-{} loaded successfully!".format(network, kf))
return model.to(device)
def infer_seg(images, model,
roi_size=patch_size, sw_batch_size=sw_bt_size, overlap=patch_overlap,
flip_axis=-1, rot=0):
if rot>0 and rot<4:
val_outputs = sliding_window_inference(
torch.stack([torch.rot90(k, rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(images)]),
roi_size, sw_batch_size, model, overlap=overlap)
val_outputs = torch.stack([torch.rot90(k, 4-rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(val_outputs)])
elif flip_axis>=0 and flip_axis<3 :
val_outputs = sliding_window_inference(
torch.stack([torch.flip(k, map_spatial_axes(k.ndim, flip_axis)) for k in decollate_batch(images)]),
roi_size, sw_batch_size, model, overlap=overlap)
val_outputs = torch.stack([torch.flip(k, map_spatial_axes(k.ndim, flip_axis)) for k in decollate_batch(val_outputs)])
else:
val_outputs = sliding_window_inference(
images, roi_size, sw_batch_size, model, overlap=overlap)
return val_outputs
def infer_seg_3(images, model, roi_size=patch_size, sw_batch_size=1, overlap=patch_overlap,
flip_axis=[0, 1, 2], rot=-1):
for axis in flip_axis:
images = torch.stack([torch.flip(k, map_spatial_axes(k.ndim, axis)) for k in decollate_batch(images)])
if rot > 0 and rot < 4:
images = torch.stack([torch.rot90(k, rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(images)])
val_outputs = sliding_window_inference(images, roi_size, sw_batch_size, model, overlap=overlap)
if rot > 0 and rot < 4:
val_outputs = torch.stack(
[torch.rot90(k, 4 - rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(val_outputs)])
for axis in flip_axis:
val_outputs = torch.stack([torch.flip(k, map_spatial_axes(k.ndim, axis)) for k in decollate_batch(val_outputs)])
return val_outputs
def nibout(segmentation, outputpath, imagepath):
"""
save your predictions
:param segmentation:Your prediction , the data type is "array".
:param outputpath:The save path of prediction results.
:param imagepath:The path of the image corresponding to the prediction result.
:return:
"""
# print(outputpath)
path, filename = os.path.split(imagepath)
print(filename)
image = nib.load(imagepath)
segmentation = nib.Nifti1Image(segmentation, image.affine)
qform = image.get_qform()
segmentation.set_qform(qform)
sfrom = image.get_sform()
segmentation.set_sform(sfrom)
nib.save(segmentation, os.path.join(outputpath, filename))
# import time
if __name__ == '__main__':
# _, channel, _, _, _ = input.shape
# check model load and param size
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model = load_model(network="ViolaUNet_l", device=device).eval()
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
print(f'Trainable params: {sum([p.numel() for p in trainable_params])/1000**2.} M')
# for kf in ["kf0", 'kf1', 'kf2', 'kf3', 'kf4']:
# model = load_model(network="ViolaUNet_s", kf=kf, device=device).eval()
# output = model(input)
# print(output.size())
# --- test inference speed -----------------------------
# input = torch.randn(1, 3, 512, 512, 32).cuda()
# input = torch.autograd.Variable(torch.sigmoid(torch.randn(1, 3, 512, 512, 32)), requires_grad=False).cuda()
# output = model(input)
# start_time = time.time()
# output = infer_seg(input, model)
# print('--------Cost time: {:.3f} sec --------'.format(time.time() - start_time))
| 9,563 | 43.691589 | 130 | py |
Viola-Unet | Viola-Unet-main/load_data.py | # load data and pre-post precess
import os
from glob import glob
from load_model import load_model, wind_levels, spacing
import numpy as np
from monai.transforms import *
from monai.data import Dataset, DataLoader
pre_process = Compose(
[
LoadImaged(keys=["image"]),
AddChanneld(keys=["image"]),
Spacingd(keys=["image"], pixdim=spacing, mode=("bilinear")),
CopyItemsd(keys=["image"], times=2, names=["img_2", "img_3"]),
ScaleIntensityRanged(
keys=["image"], a_min=wind_levels[0][0], a_max=wind_levels[0][1],
b_min=0.0, b_max=1.0, clip=True,
),
ScaleIntensityRanged(
keys=["img_2"], a_min=wind_levels[1][0], a_max=wind_levels[1][1],
b_min=0.0, b_max=1.0, clip=True,
),
ScaleIntensityRanged(
keys=["img_3"], a_min=wind_levels[2][0], a_max=wind_levels[2][1],
b_min=0.0, b_max=1.0, clip=True,
),
ConcatItemsd(['image', 'img_2', 'img_3'], name='image'),
DeleteItemsd(['img_2', 'img_3']),
Orientationd(keys=["image"], axcodes="RAS"), # RPI, RAS # error using orientationd for case 8 and 3
CropForegroundd(keys=["image"], source_key="image"),
NormalizeIntensityd(keys=["image"], nonzero=True, channel_wise=True), # only for Viola_xds_ model
# SpatialPadd(keys=["image"], spatial_size=patch_size),
CastToTyped(keys=["image"], dtype=(np.float32)),
# EnsureTyped(keys=["image", "label"]),
ToTensord(keys=["image"]),
]
)
post_process = Compose([
EnsureTyped(keys="pred"),
Activationsd(keys="pred", softmax=True),
# KeepLargestConnectedComponent(applied_labels=1),
Invertd(
keys="pred",
transform=pre_process, # inference_transforms, test_transforms_3c
orig_keys="image",
meta_keys="pred_meta_dict",
orig_meta_keys="image_meta_dict",
meta_key_postfix="meta_dict",
nearest_interp=False,
to_tensor=True,
),
])
# reading raw head information from input scans
read_raw_image = Compose(
[
LoadImaged(keys=["image"]),
AddChanneld(keys=["image"]),
ScaleIntensityRanged(keys=["image"],
a_min=0, a_max=100,
b_min=0., b_max=1., clip=True),
EnsureTyped(keys=["image"])
]
)
def load_data(input_folder=''):
images_nii = sorted(glob(os.path.join(input_folder, "*.nii*")))
test_file_list = [{'image': img} for img in images_nii]
test_dataset = Dataset(data=test_file_list, transform=pre_process)
dataloader = DataLoader(test_dataset, batch_size=1, num_workers=0) # only support bs=1, num_worker=0 for support Mac OS
return test_file_list, dataloader | 2,772 | 34.101266 | 124 | py |
Viola-Unet | Viola-Unet-main/viola_unet.py | # ViolaUNet is based on DynUNet
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import interpolate
from monai.networks.blocks.convolutions import Convolution
from monai.networks.layers.factories import Norm
from monai.networks.blocks.dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, get_conv_layer
def initialize_weights(*models):
for model in models:
for module in model.modules():
if isinstance(module, (nn.Conv3d, nn.Conv2d, nn.ConvTranspose3d, nn.ConvTranspose2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight, a=0.01, mode='fan_in', nonlinearity='leaky_relu')
# nn.init.xavier_normal_(module.weight, gain=1.)
# module.weight = nn.init.orthogonal_(module.weight, gain=1.)
if module.bias is not None:
module.bias.data.zero_()
# module.bias = nn.init.constant_(module.bias, 0)
elif isinstance(module, (nn.BatchNorm3d, nn.BatchNorm2d)):
# module.weight.data.fill_(1)
nn.init.normal_(module.weight.data, 1.0, 0.02)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.Linear)):
nn.init.normal_(module.weight.data, 1.0, 0.001)
if module.bias is not None:
module.bias.data.zero_()
class group_norm(nn.GroupNorm):
def __init__(self, num_groups, num_channels, permute=False): #num_features):
super(group_norm, self).__init__(num_groups, num_channels)
self.perm = permute # change shape from: b hwd c to: b c hwd
def forward(self, x):
# input shape : b 1 hwd c or b 1 c hwd, so first squeeze dim=1, then change shape to b c hwd
if self.perm:
return super(group_norm, self).forward(x.squeeze(1).permute(0, 2, 1)).permute(0, 2, 1).unsqueeze(1)
else:
# return super(BatchNorm_GCN, self).forward(x.permute(0, 2, 1)).permute(0, 2, 1)
return super(group_norm, self).forward(x.squeeze(1)).unsqueeze(1)
class LayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (var + self.eps).sqrt() * self.gamma
def l2norm(t):
return F.normalize(t, dim = -1)
class ddcmBlock_silu(nn.Module):
def __init__(self, in_dim, out_dim, rates, strides=None, kernel=3, bias=False, dropout=0.1):
super(ddcmBlock_silu, self).__init__()
self.features = []
self.num = len(rates)
self.in_dim = in_dim
self.out_dim = out_dim
if strides is None:
self.strides = [1 for i in range(self.num)]
else:
self.strides = strides
for idx, rate in enumerate(rates):
self.features.append(nn.Sequential(
nn.Conv2d(self.in_dim + idx * out_dim,
out_dim,
kernel_size=(kernel, 1),
dilation=rate,
stride=(self.strides[idx],1),
padding=(rate * (kernel - 1) // 2, 0),
bias=bias),
nn.Dropout(p=dropout)
)
)
self.features = nn.ModuleList(self.features)
self.conv1x1_out = nn.Sequential(
nn.SiLU(inplace=True),
nn.Conv2d(self.in_dim*2 + out_dim * self.num, self.in_dim, kernel_size=1, bias=False),
)
initialize_weights(self.conv1x1_out, self.features)
def forward(self, x):
b,*_ = x.size()
x = torch.squeeze(x)
if b==1:
x = x.unsqueeze(0)
if x.ndim == 2:
x = x.unsqueeze(-1)
x = x.unsqueeze(1)
_, _, H, W = x.size()
xc = x.clone()
for f in self.features:
x = torch.cat([F.interpolate(f(x), (H, W), mode='bilinear', align_corners=False), x], 1)
x = self.conv1x1_out(torch.cat([xc, x], 1))
return x
class viola_attx_ddcm_dyk(nn.Module):
def __init__(self, channel, reduction=16, min_dim=4, k_size=3):
super(viola_attx_ddcm_dyk, self).__init__()
self.x_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.y_pool = nn.AdaptiveAvgPool3d((1, None, 1))
self.z_pool = nn.AdaptiveAvgPool3d((1, 1, None))
self.sig = nn.Sigmoid()
self.act = nn.Sequential(
group_norm(2, channel),
nn.Tanh(),
)
self.relu = nn.ReLU(inplace=False)
d = max(channel // reduction, min_dim)
ck_size = k_size + 2*(channel//32)
ration = [1, ck_size, 2 * (ck_size - 1) + 1, 3 * (ck_size - 1) + 1]
strides = [2, 2, 4, 4]
ext_ch = d // 4 + 1
self.xconv = nn.Sequential(
LayerNorm(channel),
ddcmBlock_silu(1, ext_ch, ration, strides=strides, kernel=ck_size, bias=False)
)
self.yconv = nn.Sequential(
LayerNorm(channel),
ddcmBlock_silu(1, ext_ch, ration, strides=strides, kernel=ck_size, bias=False)
)
self.zconv = nn.Sequential(
LayerNorm(channel),
ddcmBlock_silu(1, ext_ch, ration, strides=strides, kernel=ck_size, bias=False)
)
initialize_weights(self.xconv, self.yconv, self.zconv, self.act)
def forward(self, x):
b, c, h, w, d = x.size()
vx = self.xconv(self.x_pool(x))
vy = self.yconv(self.y_pool(x))
vz = self.zconv(self.z_pool(x))
xs = self.sig(vx)
ys = self.sig(vy)
zs = self.sig(vz)
vxyz = self.act(torch.cat((vx, vy, vz), 3)) # b, 1, c, h+w+d
xt = 0.5 * (vxyz[:, :, :, 0:h] + xs)
yt = 0.5 * (vxyz[:, :, :, h:h+w] + ys)
zt = 0.5 * (vxyz[:, :, :, h+w:h+w+d] + zs)
xs = xs.view(b, c, h, 1, 1)
ys = ys.view(b, c, 1, w, 1)
zs = zs.view(b, c, 1, 1, d)
xt = xt.view(b, c, h, 1, 1)
yt = yt.view(b, c, 1, w, 1)
zt = zt.view(b, c, 1, 1, d)
viola_j = xs * ys + ys*zs + zs*xs # 0-3
viola_m = xs * ys * zs # 0-1
viola_a = self.relu(xt + yt + zt) # 0-3
viola = viola_j + viola_m + viola_a
viola = 0.1 * viola + 0.3
viola = viola + l2norm(viola.contiguous().view(b,-1)).view(b,c,h,w,d)
return x * viola
class GatedAttentionBlock(nn.Module):
def __init__(self, spatial_dims: int, f_int: int, f_g: int, f_l: int, dropout=0.0):
super().__init__()
self.W_g = nn.Sequential(
Convolution(
spatial_dims=spatial_dims,
in_channels=f_g,
out_channels=f_int,
kernel_size=1,
strides=1,
padding=0,
dropout=dropout,
conv_only=True,
),
Norm[Norm.BATCH, spatial_dims](f_int),
)
self.W_x = nn.Sequential(
Convolution(
spatial_dims=spatial_dims,
in_channels=f_l,
out_channels=f_int,
kernel_size=1,
strides=1,
padding=0,
dropout=dropout,
conv_only=True,
),
Norm[Norm.BATCH, spatial_dims](f_int),
)
self.psi = nn.Sequential(
Convolution(
spatial_dims=spatial_dims,
in_channels=f_int,
out_channels=1,
kernel_size=1,
strides=1,
padding=0,
dropout=dropout,
conv_only=True,
),
Norm[Norm.BATCH, spatial_dims](1),
nn.Sigmoid(),
)
self.relu = nn.ReLU()
def forward(self, g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
g1 = self.W_g(g)
x1 = self.W_x(x)
psi: torch.Tensor = self.relu(g1 + x1)
psi = self.psi(psi)
return x * psi
class DynUNetSkipLayer(nn.Module):
"""
Defines a layer in the UNet topology which combines the downsample and upsample pathways with the skip connection.
The member `next_layer` may refer to instances of this class or the final bottleneck layer at the bottom the UNet
structure. The purpose of using a recursive class like this is to get around the Torchscript restrictions on
looping over lists of layers and accumulating lists of output tensors which must be indexed. The `heads` list is
shared amongst all the instances of this class and is used to store the output from the supervision heads during
forward passes of the network.
"""
heads: Optional[List[torch.Tensor]]
def __init__(self, index, downsample, upsample, next_layer, heads=None, super_head=None):
super().__init__()
self.downsample = downsample
self.next_layer = next_layer
self.upsample = upsample
self.super_head = super_head
self.heads = heads
self.index = index
def forward(self, x):
downout = self.downsample(x)
nextout = self.next_layer(downout)
upout = self.upsample(nextout, downout)
if self.super_head is not None and self.heads is not None and self.index > 0:
self.heads[self.index - 1] = self.super_head(upout)
return upout
class UnetUpBlock_x_ddcm(nn.Module):
"""
An upsampling module that can be used for DynUNet, based on:
`Automated Design of Deep Learning Methods for Biomedical Image Segmentation <https://arxiv.org/abs/1904.08128>`_.
`nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation <https://arxiv.org/abs/1809.10486>`_.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
kernel_size: convolution kernel size.
stride: convolution stride.
upsample_kernel_size: convolution kernel size for transposed convolution layers.
norm_name: feature normalization type and arguments.
act_name: activation layer type and arguments.
dropout: dropout probability.
trans_bias: transposed convolution bias.
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
skip_channels: int,
kernel_size: Union[Sequence[int], int],
stride: Union[Sequence[int], int],
upsample_kernel_size: Union[Sequence[int], int],
norm_name: Union[Tuple, str],
act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
dropout: Optional[Union[Tuple, str, float]] = None,
trans_bias: bool = False,
my_att: bool = True,
skip_att: bool = False,#True,
):
super().__init__()
self.myatt = my_att
self.skipatt = skip_att
upsample_stride = upsample_kernel_size
if skip_att:
self.attention = GatedAttentionBlock(
spatial_dims=spatial_dims,
f_g=in_channels,
f_l=skip_channels,
f_int=in_channels // 2
# dropout=0.15
)
else: self.attention = None
self.transp_conv = get_conv_layer(
spatial_dims,
in_channels,
out_channels,
kernel_size=upsample_kernel_size,
stride=upsample_stride,
dropout=dropout,
bias=trans_bias,
conv_only=True,
is_transposed=True,
)
if my_att:
self.canc_att = viola_attx_ddcm_dyk(
channel=out_channels + skip_channels,
reduction=16, min_dim=4, k_size=3
)
else:
self.canc_att = None
self.conv_block = UnetBasicBlock(
spatial_dims,
out_channels + skip_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dropout=dropout,
norm_name=norm_name,
act_name=act_name,
)
def forward(self, inp, skip):
# number of channels for skip should equals to out_channels
out = self.transp_conv(inp)
if self.skipatt:
att = self.attention(
F.interpolate(inp, skip.size()[2:], mode='trilinear', align_corners=False),
skip
)
out = torch.cat((att, out), dim=1)
else:
out = torch.cat((skip, out), dim=1)
if self.myatt:
out = self.canc_att(out)
out = self.conv_block(out)
return out
class ViolaUNet(nn.Module):
"""
This reimplementation of ViolaUNet is based on dynamic UNet of Monai:
This model is more flexible compared with ``monai.networks.nets.UNet`` in three
places:
- Residual connection is supported in conv blocks.
- Anisotropic kernel sizes and strides can be used in each layers.
- Deep supervision heads can be added and sumup during inference.
- Encoder and Decoder can have insymetric filter numbers
- Support both Viola attention and gated attention methods.
The model supports 2D or 3D inputs and is consisted with four kinds of blocks:
one input block, `n` downsample blocks, one bottleneck and `n+1` upsample blocks. Where, `n>0`.
The first and last kernel and stride values of the input sequences are used for input block and
bottleneck respectively, and the rest value(s) are used for downsample and upsample blocks.
Therefore, pleasure ensure that the length of input sequences (``kernel_size`` and ``strides``)
is no less than 3 in order to have at least one downsample and upsample blocks.
To meet the requirements of the structure, the input size for each spatial dimension should be divisible
by the product of all strides in the corresponding dimension. In addition, the minimal spatial size should have
at least one dimension that has twice the size of the product of all strides.
For example, if `strides=((1, 2, 4), 2, 2, 1)`, the spatial size should be divisible by `(4, 8, 16)`,
and the minimal spatial size is `(8, 8, 16)` or `(4, 16, 16)` or `(4, 8, 32)`.
The output size for each spatial dimension equals to the input size of the corresponding dimension divided by the
stride in strides[0].
For example, if `strides=((1, 2, 4), 2, 2, 1)` and the input size is `(64, 32, 32)`, the output size is `(64, 16, 8)`.
For backwards compatibility with old weights, please set `strict=False` when calling `load_state_dict`.
Usage example with medical segmentation decathlon dataset is available at:
https://github.com/Project-MONAI/tutorials/tree/master/modules/dynunet_pipeline.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
kernel_size: convolution kernel size.
strides: convolution strides for each blocks.
upsample_kernel_size: convolution kernel size for transposed convolution layers. The values should
equal to strides[1:].
filters: number of output channels for each encoder blocks. Different from nnU-Net, in this implementation we add
this argument to make the network more flexible. As shown in the third reference, one way to determine
this argument is like:
``[64, 96, 128, 192, 256, 384, 512, 768, 1024][: len(strides)]``.
The above way is used in the network that wins task 1 in the BraTS21 Challenge.
If not specified, the way which nnUNet used will be employed. Defaults to ``None``.
dec_filters: number of output channels for each decoder blocks.
If not specified, the way which nnUNet used will be employed. Defaults to ``None``.
dropout: dropout ratio. Defaults to no dropout.
norm_name: feature normalization type and arguments. Defaults to ``INSTANCE``.
`INSTANCE_NVFUSER` is a faster version of the instance norm layer, it can be used when:
1) `spatial_dims=3`, 2) CUDA device is available, 3) `apex` is installed and 4) non-Windows OS is used.
act_name: activation layer type and arguments. Defaults to ``leakyrelu``.
deep_supervision: whether to add deep supervision head before output. Defaults to ``False``.
If ``True``, in training mode, the forward function will output not only the final feature map
(from `output_block`), but also the feature maps that come from the intermediate up sample layers.
In order to unify the return type (the restriction of TorchScript), all intermediate
feature maps are interpolated into the same size as the final feature map and stacked together
(with a new dimension in the first axis)into one single tensor.
For instance, if there are two intermediate feature maps with shapes: (1, 2, 16, 12) and
(1, 2, 8, 6), and the final feature map has the shape (1, 2, 32, 24), then all intermediate feature maps
will be interpolated into (1, 2, 32, 24), and the stacked tensor will has the shape (1, 3, 2, 32, 24).
When calculating the loss, you can use torch.unbind to get all feature maps can compute the loss
one by one with the ground truth, then do a weighted average for all losses to achieve the final loss.
deep_supr_num: number of feature maps that will output during deep supervision head. The
value should be larger than 0 and less than the number of up sample layers.
Defaults to 1.
res_block: whether to use residual connection based convolution blocks during the network.
Defaults to ``False``.
trans_bias: whether to set the bias parameter in transposed convolution layers. Defaults to ``False``.
viola_att: whether to use viola attention module during the network. Defaults to ``True``.
gated_att: whether to use gated attention module during the network. Defaults to ``False``.
sum_deep_supr: whether to sum up all output (deep supervision) during inference. Defaults to ``False``.
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: Sequence[Union[Sequence[int], int]],
strides: Sequence[Union[Sequence[int], int]],
upsample_kernel_size: Sequence[Union[Sequence[int], int]],
filters: Optional[Sequence[int]] = None, # up to bottom
dec_filters: Optional[Sequence[int]] = None, # bottom to up
dropout: Optional[Union[Tuple, str, float]] = None,
norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
deep_supervision: bool = False,
deep_supr_num: int = 1,
res_block: bool = False,
trans_bias: bool = False,
viola_att: bool = True,
gated_att: bool = False,
sum_deep_supr: bool = False,
):
super().__init__()
self.spatial_dims = spatial_dims
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.strides = strides
self.upsample_kernel_size = upsample_kernel_size
self.norm_name = norm_name
self.act_name = act_name
self.dropout = dropout
self.conv_block = UnetResBlock if res_block else UnetBasicBlock
self.trans_bias = trans_bias
self.my_att = viola_att
self.skip_att = gated_att
self.sum_deep_supr = sum_deep_supr
if filters is not None:
self.filters = filters
self.check_filters()
else:
self.filters = [min(2 ** (5 + i), 320 if spatial_dims == 3 else 512) for i in range(len(strides))]
if dec_filters is None:
self.dec_filters = self.filters
else:
self.dec_filters = dec_filters
self.input_block = self.get_input_block()
self.downsamples = self.get_downsamples()
self.bottleneck = self.get_bottleneck()
self.upsamples = self.get_upsamples()
self.output_block = self.get_output_block(0)
self.deep_supervision = deep_supervision
self.deep_supr_num = deep_supr_num
# initialize the typed list of supervision head outputs so that Torchscript can recognize what's going on
self.heads: List[torch.Tensor] = [torch.rand(1)] * self.deep_supr_num
if self.deep_supervision:
self.deep_supervision_heads = self.get_deep_supervision_heads()
self.check_deep_supr_num()
self.apply(self.initialize_weights)
self.check_kernel_stride()
def create_skips(index, downsamples, upsamples, bottleneck, superheads=None):
"""
Construct the UNet topology as a sequence of skip layers terminating with the bottleneck layer. This is
done recursively from the top down since a recursive nn.Module subclass is being used to be compatible
with Torchscript. Initially the length of `downsamples` will be one more than that of `superheads`
since the `input_block` is passed to this function as the first item in `downsamples`, however this
shouldn't be associated with a supervision head.
"""
if len(downsamples) != len(upsamples):
raise ValueError(f"{len(downsamples)} != {len(upsamples)}")
if len(downsamples) == 0: # bottom of the network, pass the bottleneck block
return bottleneck
if superheads is None:
next_layer = create_skips(1 + index, downsamples[1:], upsamples[1:], bottleneck)
return DynUNetSkipLayer(index, downsample=downsamples[0], upsample=upsamples[0], next_layer=next_layer)
super_head_flag = False
if index == 0: # don't associate a supervision head with self.input_block
rest_heads = superheads
else:
if len(superheads) > 0:
super_head_flag = True
rest_heads = superheads[1:]
else:
rest_heads = nn.ModuleList()
# create the next layer down, this will stop at the bottleneck layer
next_layer = create_skips(1 + index, downsamples[1:], upsamples[1:], bottleneck, superheads=rest_heads)
if super_head_flag:
return DynUNetSkipLayer(
index,
downsample=downsamples[0],
upsample=upsamples[0],
next_layer=next_layer,
heads=self.heads,
super_head=superheads[0],
)
return DynUNetSkipLayer(index, downsample=downsamples[0], upsample=upsamples[0], next_layer=next_layer)
if not self.deep_supervision:
self.skip_layers = create_skips(
0, [self.input_block] + list(self.downsamples), self.upsamples[::-1], self.bottleneck
)
else:
self.skip_layers = create_skips(
0,
[self.input_block] + list(self.downsamples),
self.upsamples[::-1],
self.bottleneck,
superheads=self.deep_supervision_heads,
)
def check_kernel_stride(self):
kernels, strides = self.kernel_size, self.strides
error_msg = "length of kernel_size and strides should be the same, and no less than 3."
if len(kernels) != len(strides) or len(kernels) < 3:
raise ValueError(error_msg)
for idx, k_i in enumerate(kernels):
kernel, stride = k_i, strides[idx]
if not isinstance(kernel, int):
error_msg = f"length of kernel_size in block {idx} should be the same as spatial_dims."
if len(kernel) != self.spatial_dims:
raise ValueError(error_msg)
if not isinstance(stride, int):
error_msg = f"length of stride in block {idx} should be the same as spatial_dims."
if len(stride) != self.spatial_dims:
raise ValueError(error_msg)
def check_deep_supr_num(self):
deep_supr_num, strides = self.deep_supr_num, self.strides
num_up_layers = len(strides) - 1
if deep_supr_num >= num_up_layers:
raise ValueError("deep_supr_num should be less than the number of up sample layers.")
if deep_supr_num < 1:
raise ValueError("deep_supr_num should be larger than 0.")
def check_filters(self):
filters = self.filters
if len(filters) < len(self.strides):
raise ValueError("length of filters should be no less than the length of strides.")
else:
self.filters = filters[: len(self.strides)]
def forward(self, x):
out = self.skip_layers(x)
out = self.output_block(out)
if self.training and self.deep_supervision:
out_all = [out]
for feature_map in self.heads:
out_all.append(interpolate(feature_map, out.shape[2:]))
return torch.stack(out_all, dim=1)
elif self.deep_supervision and self.sum_deep_supr:
out = F.softmax(out, 1)
for i, feature_map in enumerate(self.heads):
out_ds = F.softmax(interpolate(feature_map, out.shape[2:]), 1)
out_ds = 0.5**(i+1) * out_ds
out = out + out_ds
return out
def get_input_block(self):
return self.conv_block(
self.spatial_dims,
self.in_channels,
self.filters[0],
self.kernel_size[0],
self.strides[0],
self.norm_name,
self.act_name,
dropout=self.dropout,
)
def get_bottleneck(self):
return self.conv_block(
self.spatial_dims,
self.filters[-2],
self.filters[-1],
self.kernel_size[-1],
self.strides[-1],
self.norm_name,
self.act_name,
dropout=self.dropout,
)
def get_output_block(self, idx: int):
return UnetOutBlock(self.spatial_dims, self.dec_filters[idx], self.out_channels, dropout=self.dropout)
def get_downsamples(self):
inp, out = self.filters[:-2], self.filters[1:-1]
strides, kernel_size = self.strides[1:-1], self.kernel_size[1:-1]
return self.get_module_list(inp, out, out, kernel_size, strides, self.conv_block)
def get_upsamples(self):
# inp, out = self.dec_filters[1:][::-1], self.dec_filters[:-1][::-1]
# skip_c = self.filters[:-1][::-1]
inp, out = (self.filters[-1], *self.dec_filters[::-1]), self.dec_filters[::-1]
skip_c = self.filters[:-1][::-1]
strides, kernel_size = self.strides[1:][::-1], self.kernel_size[1:][::-1]
upsample_kernel_size = self.upsample_kernel_size[::-1]
return self.get_module_list(
inp, out, skip_c, kernel_size, strides, UnetUpBlock_x_ddcm, upsample_kernel_size, trans_bias=self.trans_bias
)
def get_module_list(
self,
in_channels: List[int],
out_channels: List[int],
skip_channels: List[int],
kernel_size: Sequence[Union[Sequence[int], int]],
strides: Sequence[Union[Sequence[int], int]],
conv_block: nn.Module,
upsample_kernel_size: Optional[Sequence[Union[Sequence[int], int]]] = None,
trans_bias: bool = False,
):
layers = []
if upsample_kernel_size is not None:
for in_c, out_c, skip_c, kernel, stride, up_kernel in zip(
in_channels, out_channels, skip_channels, kernel_size, strides, upsample_kernel_size
):
params = {
"spatial_dims": self.spatial_dims,
"in_channels": in_c,
"out_channels": out_c,
"skip_channels": skip_c,
"kernel_size": kernel,
"stride": stride,
"norm_name": self.norm_name,
"act_name": self.act_name,
"dropout": self.dropout,
"upsample_kernel_size": up_kernel,
"trans_bias": trans_bias,
"my_att": self.my_att,
"skip_att": self.skip_att,
}
layer = conv_block(**params)
layers.append(layer)
else:
for in_c, out_c, kernel, stride in zip(in_channels, out_channels, kernel_size, strides):
params = {
"spatial_dims": self.spatial_dims,
"in_channels": in_c,
"out_channels": out_c,
"kernel_size": kernel,
"stride": stride,
"norm_name": self.norm_name,
"act_name": self.act_name,
"dropout": self.dropout,
}
layer = conv_block(**params)
layers.append(layer)
return nn.ModuleList(layers)
def get_deep_supervision_heads(self):
return nn.ModuleList([self.get_output_block(i + 1) for i in range(self.deep_supr_num)])
@staticmethod
def initialize_weights(module):
if isinstance(module, (nn.Conv3d, nn.Conv2d, nn.ConvTranspose3d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(module.weight, a=0.01, mode='fan_in', nonlinearity='leaky_relu')
# nn.init.xavier_normal_(module.weight, gain=1.)
# module.weight = nn.init.orthogonal_(module.weight, gain=1.)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
elif isinstance(module, (nn.BatchNorm3d, nn.BatchNorm2d)):
# module.weight.data.fill_(1)
nn.init.normal_(module.weight.data, 1.0, 0.02)
module.bias.data.zero_()
elif isinstance(module, (nn.Linear)):
nn.init.normal_(module.weight.data, 1.0, 0.001)
if module.bias is not None:
module.bias.data.zero_() | 31,635 | 41.23765 | 122 | py |
BlockGCL | BlockGCL-master/dataloader.py | import os.path as osp
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torch_geometric.data import Data
from torch_geometric.datasets import Planetoid, Amazon, Coauthor, WikiCS
from torch_geometric.transforms import Compose, NormalizeFeatures, ToUndirected
from ogb.nodeproppred import PygNodePropPredDataset
def load_data(data_dir, dataset_name,
transform=Compose([ToUndirected()]),
mask_dir="./mask",
load_mask=True,
save_mask=True):
"""Load PyG dataset."""
load_mask = save_mask = True
if dataset_name in ['Cora', 'Citeseer', 'Pubmed']:
dataset = Planetoid(root=data_dir, name=dataset_name,
transform=transform, split="full")
load_mask = save_mask = False
elif dataset_name in ['WikiCS']:
dataset = WikiCS(root=osp.join(data_dir, dataset_name),
transform=transform)
elif dataset_name in ['Computers', 'Photo']:
dataset = Amazon(root=data_dir, name=dataset_name, transform=transform)
elif dataset_name in ['CS', 'Physics']:
dataset = Coauthor(root=data_dir, name=dataset_name,
transform=transform)
elif dataset_name in ['ogbn-arxiv']:
dataset = PygNodePropPredDataset(root=data_dir, name=dataset_name,
transform=transform)
dataset.data.y = dataset.data.y.squeeze()
load_mask = save_mask = False
elif dataset_name in ['ogbn-mag']:
dataset = PygNodePropPredDataset(name=dataset_name, root=data_dir,
transform=Compose([
ToUndirected()
]))
rel_data = dataset[0]
# We are only interested in paper <-> paper relations.
data = Data(
x=rel_data.x_dict['paper'],
edge_index=rel_data.edge_index_dict[('paper', 'cites', 'paper')],
y=rel_data.y_dict['paper'])
data = transform(data)
dataset.data = data
dataset.data.y = dataset.data.y.squeeze()
load_mask = save_mask = False
else:
raise ValueError("Dataset {} not implemented.".format(dataset_name))
mask_path = osp.join(mask_dir, "{}.pt".format(dataset_name))
if osp.exists(mask_path) and load_mask:
train_mask, val_mask, test_mask = load_preset_mask(mask_path)
else:
train_mask, val_mask, test_mask = create_mask(
dataset=dataset,
dataset_name=dataset_name,
mask_path=mask_path if save_mask else None)
dataset.data.train_mask = train_mask
dataset.data.val_mask = val_mask
dataset.data.test_mask = test_mask
return dataset
def create_mask(dataset, dataset_name='WikiCS', data_seed=0, mask_path=None):
r"""Create train/val/test mask for each dataset."""
data = dataset[0]
if dataset_name in ['Cora', 'Citeseer', 'Pubmed']:
train_mask, val_mask, test_mask = \
data.train_mask, data.val_mask, data.test_mask
elif dataset_name in ['WikiCS']:
train_mask = data.train_mask.t()
val_mask = data.val_mask.t()
test_mask = data.test_mask.repeat(20, 1)
elif dataset_name in ['Computers', 'Photo', 'CS', 'Physics']:
idx = np.arange(len(data.y))
train_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
val_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
test_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
train_idx, test_idx = train_test_split(
idx, test_size=0.8, random_state=data_seed)
train_idx, val_idx = train_test_split(
train_idx, test_size=0.5, random_state=data_seed)
train_mask[train_idx] = True
val_mask[val_idx] = True
test_mask[test_idx] = True
elif dataset_name in ['ogbn-arxiv']:
split_idx = dataset.get_idx_split()
train_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
val_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
test_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
train_mask[split_idx['train']] = True
val_mask[split_idx['valid']] = True
test_mask[split_idx['test']] = True
elif dataset_name in ['ogbn-mag']:
split_idx = dataset.get_idx_split()
train_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
val_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
test_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
train_mask[split_idx['train']['paper']] = True
val_mask[split_idx['valid']['paper']] = True
test_mask[split_idx['test']['paper']] = True
# save preset mask
if mask_path is not None:
torch.save([train_mask, val_mask, test_mask], mask_path)
return train_mask, val_mask, test_mask
def load_preset_mask(mask_path):
return torch.load(mask_path)
| 5,008 | 37.236641 | 81 | py |
BlockGCL | BlockGCL-master/loss.py | import torch
import torch.nn.functional as F
def inv_dec_loss(h1, h2, lambd):
N = h1.size(0)
c = torch.mm(h1.T, h2)
c1 = torch.mm(h1.T, h1)
c2 = torch.mm(h2.T, h2)
c = c / N
c1 = c1 / N
c2 = c2 / N
loss_inv = -torch.diagonal(c).sum()
iden = torch.eye(c.shape[0]).to(h1.device)
loss_dec1 = (iden - c1).pow(2).sum()
loss_dec2 = (iden - c2).pow(2).sum()
loss = loss_inv + lambd * (loss_dec1 + loss_dec2)
return loss | 471 | 19.521739 | 53 | py |
BlockGCL | BlockGCL-master/utils.py | import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch_sparse import SparseTensor
def set_random_seeds(random_seed=0):
r"""Set the seed for generating random numbers."""
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
def edgeidx2sparse(edge_index, num_nodes):
return SparseTensor.from_edge_index(
edge_index, sparse_sizes=(num_nodes, num_nodes)
).to(edge_index.device)
| 658 | 27.652174 | 55 | py |
BlockGCL | BlockGCL-master/model.py | import torch
import torch.nn as nn
from torch_geometric.nn import BatchNorm, GCNConv, LayerNorm, SAGEConv, Sequential
def get_activation(name='ReLU'):
if name == 'ReLU':
return nn.ReLU()
elif name == "PReLU":
return nn.PReLU()
else:
raise NotImplementedError("Acitivation {} not implemented!".format(name))
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, layer_name="gcn", act_name="ReLU", batchnorm=True) -> None:
super().__init__()
self.in_dim, self.out_dim = in_dim, out_dim
self.batchnorm = None
self.layer = self.get_layer(layer_name)
if batchnorm:
self.batchnorm = nn.BatchNorm1d(out_dim)
self.act = get_activation(act_name)
def reset_parameters(self):
self.layer.reset_parameters()
if self.batchnorm is not None:
self.batchnorm.reset_parameters()
def get_layer(self, name="GCN"):
if name == "GCN":
return GCNConv(in_channels=self.in_dim, out_channels=self.out_dim)
else:
raise NotImplementedError("Layer {} not implemented!".format(name))
def forward(self, x, egde_index):
x = self.layer(x, egde_index)
if self.batchnorm is not None:
x = self.batchnorm(x)
return self.act(x)
class GCN(nn.Module):
def __init__(self, in_dim, hid_dims, args):
super().__init__()
dims = [in_dim] + hid_dims
assert len(dims) >= 2
self.layers = nn.ModuleList()
for in_dim, out_dim in zip(dims[:-1], dims[1:]):
self.layers.append(ConvLayer(in_dim, out_dim, args.layer_name, args.act_name, args.batchnorm))
def forward(self, x, edge_index):
outputs = []
for layer in self.layers:
x = layer(x.detach(), edge_index)
# x = layer(x, edge_index)
outputs.append(x)
return outputs
@torch.no_grad()
def embeds(self, x, edge_index):
for layer in self.layers:
x = layer(x.detach(), edge_index)
return x
def reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
@torch.no_grad()
def get_embeding(self, data):
self.eval()
x, edge_index = data.x, data.edge_index
for layer in self.layers:
x = layer(x, edge_index)
return x | 2,427 | 28.975309 | 106 | py |
BlockGCL | BlockGCL-master/logger.py | import functools
import logging
import os
import sys
import torch
from typing import Optional
from termcolor import colored
__all__ = ["setup_logger", "get_logger"]
# cache the opened file object, so that different calls to `setup_logger`
# with the same file name can safely write to the same file.
@functools.lru_cache(maxsize=None)
def setup_logger(
output: Optional[str] = None, distributed_rank: int = 0, *, mode: str = 'w',
color: bool = True, name: str = "exp", abbrev_name: Optional[str] = None
):
"""Initialize the graphwar logger and set its verbosity level to "DEBUG".
Parameters
----------
output : Optional[str], optional
a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
distributed_rank : int, optional
used for distributed training, by default 0
mode : str, optional
mode for the output file (if output is given), by default 'w'.
color : bool, optional
whether to use color when printing, by default True
name : str, optional
the root module name of this logger, by default "graphwar"
abbrev_name : Optional[str], optional
an abbreviation of the module, to avoid long names in logs.
Set to "" to not log the root module in logs.
By default, None.
Returns
-------
logging.Logger
a logger
Example
-------
>>> logger = setup_logger(name='my exp')
>>> logger.info('message')
[12/19 17:01:43 my exp]: message
>>> logger.error('message')
ERROR [12/19 17:02:22 my exp]: message
>>> logger.warning('message')
WARNING [12/19 17:02:32 my exp]: message
>>> # specify output files
>>> logger = setup_logger(output='log.txt', name='my exp')
# additive, by default mode='w'
>>> logger = setup_logger(output='log.txt', name='my exp', mode='a')
# once you logger is set, you can call it by
>>> logger = get_logger(name='my exp')
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if abbrev_name is None:
abbrev_name = name
plain_formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
)
# stdout logging: master only
if distributed_rank == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter(
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
abbrev_name=str(abbrev_name),
)
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
# file logging: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if distributed_rank > 0:
filename = filename + ".rank{}".format(distributed_rank)
dirs = os.path.dirname(filename)
if dirs:
if not os.path.isdir(dirs):
os.makedirs(dirs)
file_handle = logging.FileHandler(filename=filename, mode=mode)
file_handle.setLevel(logging.DEBUG)
file_handle.setFormatter(plain_formatter)
logger.addHandler(file_handle)
return logger
def get_logger(name: str = "GraphWar"):
"""Get a logger for a given name.
Parameters
----------
name : str, optional
name of the logger, by default "GraphWar"
Returns
-------
a logger for the given name
"""
return logging.getLogger(name)
class _ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
self._abbrev_name = kwargs.pop("abbrev_name", "")
if len(self._abbrev_name):
self._abbrev_name = self._abbrev_name + "."
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record):
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
else:
return log
return prefix + " " + log
class Statistics(object):
def __init__(self, runs, info=None):
self.info = info
self.results = [[] for _ in range(runs)]
def add_result(self, run, result):
assert len(result) == 2
assert run >= 0 and run < len(self.results)
self.results[run].append(result)
def print_statistics(self, run=None, f=sys.stdout, last_best=False):
if run is not None:
result = 100 * torch.tensor(self.results[run])
if last_best:
# get last max value index by reversing result tensor
argmax = result.size(0) - result[:, 0].flip(dims=[0]).argmax().item() - 1
else:
argmax = result[:, 0].argmax().item()
print(f'Run {run + 1:02d}:', file=f)
print(f'Highest Valid: {result[:, 0].max():.2f}', file=f)
print(f'Highest Eval Point: {argmax + 1}', file=f)
print(f' Final Test: {result[argmax, 1]:.2f}', file=f)
else:
result = 100 * torch.tensor(self.results)
best_results = []
for r in result:
valid = r[:, 0].max().item()
if last_best:
# get last max value index by reversing result tensor
argmax = r.size(0) - r[:, 0].flip(dims=[0]).argmax().item() - 1
else:
argmax = r[:, 0].argmax().item()
test = r[argmax, 1].item()
best_results.append((valid, test))
best_result = torch.tensor(best_results)
print(f'All runs:', file=f)
r = best_result[:, 0]
print(f'Highest Valid: {r.mean():.2f} ± {r.std():.2f}', file=f)
r = best_result[:, 1]
print(f' Final Test: {r.mean():.2f} ± {r.std():.2f}', file=f)
return r.mean().cpu().item(), r.std().cpu().item() | 6,665 | 33.184615 | 89 | py |
BlockGCL | BlockGCL-master/eval.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
def test(embeds, data, num_classes, FLAGS, device="cpu"):
return node_cls_downstream_task_eval(
input_emb=embeds, data=data, num_classes=num_classes,
lr=FLAGS.lr_cls, wd=FLAGS.wd_cls,
cls_epochs=FLAGS.epochs_cls, cls_runs=5, device=device)
def batch_test(embeds, data, num_classes, FLAGS, device="cpu"):
return batch_node_cls_downstream_task_eval(
input_emb=embeds, data=data, num_classes=num_classes,
lr=FLAGS.lr_cls, wd=FLAGS.wd_cls,
cls_epochs=FLAGS.epochs_cls, cls_runs=5, device=device)
def eval_acc(model, x, y):
model.eval()
with torch.no_grad():
output = model(x)
y_pred = torch.argmax(output, dim=1).squeeze(-1)
return (y_pred == y).float().mean().item()
class Classifier(nn.Module):
def __init__(self, in_dim, out_dim):
super(Classifier, self).__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, x):
x = self.linear(x)
return x.log_softmax(dim=-1)
def reset_parameters(self):
self.linear.reset_parameters()
def batch_train_cls(cls, x, y, train_mask, val_mask, test_mask,
lr=1e-2, weight_decay=1e-5, epochs=100):
cls.reset_parameters()
optimizer = torch.optim.AdamW(
cls.parameters(), lr=lr, weight_decay=weight_decay)
train_x, train_y = x[train_mask], y[train_mask]
val_x, val_y = x[val_mask], y[val_mask]
test_x, test_y = x[test_mask], y[test_mask]
train_loader = DataLoader(torch.arange(train_x.size(0)), pin_memory=False, batch_size=8192, shuffle=True)
best_val_acc, best_test_acc = 0.0, 0.0
for _ in range(epochs):
for train_idx in train_loader:
cls.train()
optimizer.zero_grad()
output = cls(train_x[train_idx])
loss = F.nll_loss(output, train_y[train_idx])
loss.backward()
optimizer.step()
val_acc, test_acc = eval_acc(cls, val_x, val_y), eval_acc(cls, test_x, test_y)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
return best_val_acc, best_test_acc
def train_cls(cls, x, y, train_mask, val_mask, test_mask,
lr=1e-2, weight_decay=1e-5, epochs=100):
cls.reset_parameters()
optimizer = torch.optim.AdamW(
cls.parameters(), lr=lr, weight_decay=weight_decay)
train_x, train_y = x[train_mask], y[train_mask]
val_x, val_y = x[val_mask], y[val_mask]
test_x, test_y = x[test_mask], y[test_mask]
best_val_acc, best_test_acc = 0.0, 0.0
for _ in range(epochs):
cls.train()
optimizer.zero_grad()
output = cls(train_x)
loss = F.nll_loss(output, train_y)
loss.backward()
optimizer.step()
val_acc, test_acc = eval_acc(cls, val_x, val_y), eval_acc(cls, test_x, test_y)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
return best_val_acc, best_test_acc
def batch_node_cls_downstream_task_eval(input_emb, data, num_classes,
lr, wd, cls_epochs=100,
cls_runs=10, device="cpu"):
all_val_acc, all_test_acc = [], []
# input_emb = F.normalize(input_emb, dim=1) # l2 normalize
gnn_emb_dim = input_emb.size(1)
classifier = Classifier(gnn_emb_dim, num_classes).to(device)
for _ in range(cls_runs):
best_val_acc, best_test_acc = batch_train_cls(
classifier, input_emb, data.y,
data.train_mask, data.val_mask, data.test_mask,
lr=lr, weight_decay=wd, epochs=cls_epochs)
all_val_acc.append(best_val_acc)
all_test_acc.append(best_test_acc)
return all_val_acc, all_test_acc
def node_cls_downstream_task_eval(input_emb, data, num_classes,
lr, wd, cls_epochs=100,
cls_runs=10, device="cpu"):
all_val_acc, all_test_acc = [], []
# input_emb = F.normalize(input_emb, dim=1) # l2 normalize
gnn_emb_dim = input_emb.size(1)
classifier = Classifier(gnn_emb_dim, num_classes).to(device)
for _ in range(cls_runs):
best_val_acc, best_test_acc = train_cls(
classifier, input_emb, data.y,
data.train_mask, data.val_mask, data.test_mask,
lr=lr, weight_decay=wd, epochs=cls_epochs)
all_val_acc.append(best_val_acc)
all_test_acc.append(best_test_acc)
return all_val_acc, all_test_acc
| 4,642 | 31.468531 | 109 | py |
BlockGCL | BlockGCL-master/train.py | import copy
import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from absl import app, flags
from torch.optim import AdamW
# custom modules
from logger import setup_logger
from utils import set_random_seeds, edgeidx2sparse
from transforms import get_graph_drop_transform
from model import GCN
from loss import inv_dec_loss
from eval import test, batch_test
from dataloader import load_data
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'model_seed', 123, 'Random seed used for model initialization and training.')
flags.DEFINE_integer(
'data_seed', 0, 'Random seed used to generate train/val/test split.')
flags.DEFINE_integer('gpu_id', 0, 'The id of GPU to use. -1 indicates CPU.')
# Dataset.
flags.DEFINE_enum('dataset', 'ogbn-mag',
['Cora', 'Citeseer', 'Pubmed', 'Computers', 'Photo',
'CS', 'Physics', 'WikiCS', 'ogbn-arxiv', 'ogbn-mag'],
'Which graph dataset to use.')
flags.DEFINE_string('data_dir', '~/public_data/pyg_data/',
'Where the dataset resides.')
# Architecture.
flags.DEFINE_multi_integer('graph_encoder_layer', [
256, 256], 'Conv layer sizes.')
flags.DEFINE_bool('batchnorm', True, 'Batchnorm or not.')
flags.DEFINE_string('layer_name', "GCN", 'Con. layer.')
flags.DEFINE_string('act_name', "ReLU", 'Activation funciton.')
# Training hyperparameters.
flags.DEFINE_float('lambd', 1e-3, 'The ratio for decorrelation loss.')
flags.DEFINE_integer('epochs', 500, 'The number of training epochs.')
flags.DEFINE_float('lr', 1e-3, 'The learning rate for model training.')
flags.DEFINE_float('weight_decay', 1e-5,
'The value of the weight decay for training.')
flags.DEFINE_float(
'lr_cls', 1e-2,
'The learning rate for model training for downstream classifier.')
flags.DEFINE_float(
'wd_cls', 1e-5,
'The value of the weight decay for training for downstream classifier..')
flags.DEFINE_integer(
'epochs_cls', 100,
'The number of training epochs for node downstream classifier.')
# Augmentations.
flags.DEFINE_float('drop_edge_p', 0.4, 'Probability of edge dropout 1.')
flags.DEFINE_float('drop_feat_p', 0.2,
'Probability of node feature dropout 1.')
# Logging and checkpoint.
flags.DEFINE_string(
'logdir', None, 'Where the checkpoint and logs are stored.')
flags.DEFINE_string('mask_dir', './mask',
'Where the checkpoint and logs are stored.')
# Evaluation
flags.DEFINE_integer('eval_period', 5, 'Evaluate every eval_epochs.')
def run(dataset, logger):
gpu_available = torch.cuda.is_available() and FLAGS.gpu_id >= 0
device = torch.device("cuda:{}".format(FLAGS.gpu_id)) if gpu_available \
else torch.device("cpu")
logger.info("Using {} for training.".format(device))
data = dataset[0].to(device)
num_classes = dataset.num_classes
# set random seed
if FLAGS.model_seed is not None:
set_random_seeds(FLAGS.model_seed)
logger.info("Random seed set to {}.".format(FLAGS.model_seed))
transform = get_graph_drop_transform(drop_edge_p=FLAGS.drop_edge_p,
drop_feat_p=FLAGS.drop_feat_p)
encoder = GCN(data.x.size(1), FLAGS.graph_encoder_layer, FLAGS).to(device)
optimizer = AdamW(params=[{"params" :encoder.parameters()}],
lr=FLAGS.lr,
weight_decay=FLAGS.weight_decay)
# number of parameters
total_params = sum([param.nelement() for param in encoder.parameters()])
logger.info(encoder)
logger.info("Number of parameter: %.2fM" % (total_params/1e6))
# start training
logger.info("Satrt training")
best_test_acc_mean, best_test_acc_std, \
best_test_acc_epoch, best_test_acc_list = 0, 0, 0, []
for epoch in range(1, 1 + FLAGS.epochs):
# torch.cuda.empty_cache()
encoder.train()
optimizer.zero_grad()
data1 = transform(data)
data2 = transform(data)
data1.edge_index = edgeidx2sparse(data1.edge_index, data1.x.size(0))
data2.edge_index = edgeidx2sparse(data2.edge_index, data2.x.size(0))
outputs1, outputs2 = encoder(data1.x, data1.edge_index), encoder(data2.x, data2.edge_index)
total_loss = 0.
for o1, o2 in list(zip(outputs1, outputs2)):
loss = inv_dec_loss(o1, o2, FLAGS.lambd)
# total_loss += loss
total_loss += loss.item()
loss.backward()
# total_loss.backward()
optimizer.step()
# eval
if epoch == 1 or epoch % FLAGS.eval_period == 0:
encoder.eval()
with torch.no_grad():
# embeds = torch.cat(encoder(data), dim=1)
embeds = encoder(data.x, data.edge_index)[-1]
# embeds = encoder.embeds(data.x, data.edge_index)
# embeds = torch.cat(embeds, dim=1)
if FLAGS.dataset in ['ogbn-arxiv', 'ogbn-mag']:
_, test_acc_list = batch_test(embeds=embeds,
data=data,
num_classes=num_classes,
FLAGS=FLAGS,
device=device)
else:
_, test_acc_list = test(embeds=embeds,
data=data,
num_classes=num_classes,
FLAGS=FLAGS,
device=device)
test_acc_mean, test_acc_std = \
np.mean(test_acc_list), np.std(test_acc_list)
if test_acc_mean > best_test_acc_mean:
best_test_acc_mean = test_acc_mean
best_test_acc_std = test_acc_std
best_test_acc_epoch = epoch
best_test_acc_list = copy.deepcopy(test_acc_list)
# save encoder weights
# torch.save(model.online_encoder.state_dict(), os.path.join(FLAGS.logdir, '{}.pt'.format(FLAGS.dataset)))
logger.info("[Epoch {:4d}/{:4d}] loss={:.4f}, "
"test_acc={:.2f}±{:.2f} "
"[best_test_acc: {:.2f}±{:.2f} at epoch {}]".format(
epoch, FLAGS.epochs, total_loss,
test_acc_mean * 100, test_acc_std * 100,
best_test_acc_mean * 100, best_test_acc_std * 100,
best_test_acc_epoch
))
logger.info("Best test acc: {:.2f}±{:.2f} at epoch {}: {}".format(
best_test_acc_mean * 100, best_test_acc_std * 100,
best_test_acc_epoch, best_test_acc_list
))
def get_dataset():
dataset = load_data(data_dir=osp.expanduser(FLAGS.data_dir),
dataset_name=FLAGS.dataset,
mask_dir=FLAGS.mask_dir,
load_mask=False,
save_mask=False)
return dataset
def main(argv):
logger = setup_logger(output="./logs/exp.log".format(FLAGS.dataset))
dataset = get_dataset()
run(dataset=dataset, logger=logger)
if __name__ == "__main__":
app.run(main)
| 7,308 | 37.267016 | 122 | py |
BlockGCL | BlockGCL-master/transforms.py | import copy
import torch
from torch_geometric.utils.dropout import dropout_adj
from torch_geometric.transforms import Compose
class DropFeatures:
r"""Drops node features with probability p."""
def __init__(self, p=None):
assert 0. < p < 1., \
'Dropout probability has to be between 0 and 1, but got %.2f' % p
self.p = p
def __call__(self, data):
drop_mask = torch.empty(size=(data.x.size(1),),
dtype=torch.float32,
device=data.x.device).uniform_(0, 1) < self.p
data.x[:, drop_mask] = 0
return data
def __repr__(self):
return '{}(p={})'.format(self.__class__.__name__, self.p)
class DropEdges:
r"""Drops edges with probability p."""
def __init__(self, p, force_undirected=False):
assert 0. < p < 1., \
'Dropout probability has to be between 0 and 1, but got %.2f' % p
self.p = p
self.force_undirected = force_undirected
def __call__(self, data):
edge_index = data.edge_index
edge_attr = data.edge_attr if 'edge_attr' in data else None
edge_index, edge_attr = dropout_adj(edge_index, edge_attr,
p=self.p,
force_undirected=self.force_undirected)
data.edge_index = edge_index
if edge_attr is not None:
data.edge_attr = edge_attr
return data
def __repr__(self):
return '{}(p={}, force_undirected={})'.format(
self.__class__.__name__,
self.p,
self.force_undirected)
def get_graph_drop_transform(drop_edge_p, drop_feat_p):
transforms = list()
# make copy of graph
transforms.append(copy.deepcopy)
# drop edges
if drop_edge_p > 0.:
transforms.append(DropEdges(drop_edge_p))
# drop features
if drop_feat_p > 0.:
transforms.append(DropFeatures(drop_feat_p))
return Compose(transforms)
| 2,025 | 27.942857 | 83 | py |
RecSys_PyTorch | RecSys_PyTorch-master/main.py | # Import packages
import os
import torch
import models
from data.dataset import UIRTDataset
from evaluation.evaluator import Evaluator
from experiment.early_stop import EarlyStop
from loggers import FileLogger, CSVLogger
from utils.general import make_log_dir, set_random_seed
from config import load_config
"""
Configurations
"""
config = load_config()
exp_config = config.experiment
gpu_id = exp_config.gpu
seed = exp_config.seed
dataset_config = config.dataset
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
set_random_seed(seed)
"""
Dataset
"""
dataset = UIRTDataset(**dataset_config)
# """
# Early stop
# """
# early_stop = EarlyStop(**config['EarlyStop'])
"""
Model base class
"""
model_name = config.experiment.model_name
model_base = getattr(models, model_name)
hparams = config.hparams
"""
Logger
"""
log_dir = make_log_dir(os.path.join(exp_config.save_dir, model_name))
logger = FileLogger(log_dir)
csv_logger = CSVLogger(log_dir)
# Save log & dataset config.
logger.info(config)
logger.info(dataset)
valid_input, valid_target = dataset.valid_input, dataset.valid_target
evaluator = Evaluator(valid_input, valid_target, protocol=dataset.protocol, ks=config.evaluator.ks)
model = model_base(dataset, hparams, device)
ret = model.fit(dataset, exp_config, evaluator=evaluator, loggers=[logger, csv_logger])
print(ret['scores'])
csv_logger.save() | 1,665 | 22.8 | 103 | py |
RecSys_PyTorch | RecSys_PyTorch-master/setup.py | from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy as np
import os
pyx_directories = ["evaluation/backend/cython"]
cpp_dirs = ["evaluation/backend/cython/include"]
pwd = os.getcwd()
additional_dirs = [os.path.join(pwd, d) for d in cpp_dirs]
for t_dir in pyx_directories:
target_dir = os.path.join(pwd, t_dir)
# os.chdir(target_dir)
ori_files = set(os.listdir(t_dir))
extensions = [
Extension(
'*',
[os.path.join(t_dir, "*.pyx")],
extra_compile_args=["-std=c++11"])
]
setup(
ext_modules=cythonize(extensions, language="c++"),
include_dirs=[np.get_include()]+additional_dirs
)
new_files = set(os.listdir(t_dir))
for n_file in new_files:
if n_file not in ori_files and n_file.split(".")[-1] in ("c", "cpp"):
os.remove(os.path.join(t_dir, n_file))
# os.chdir(pwd) | 911 | 25.823529 | 77 | py |
RecSys_PyTorch | RecSys_PyTorch-master/config.py | from typing import List, Tuple
from dataclasses import dataclass, field
from omegaconf import OmegaConf
@dataclass
class DatasetConfig:
data_path:str='datasets/ml-100k/u.data'
dataname:str='ml-1m'
separator:str='\t'
binarize_threshold:float=0.0
implicit:bool=True
min_item_per_user:int=10
min_user_per_item:int=1
protocol:str='holdout' # holdout, leave_one_out
generalization:str='weak' # weak/strong
holdout_users:int=600
valid_ratio:float=0.1
test_ratio:float=0.2
leave_k:int=1
split_random:bool=True
@dataclass
class EvaluatorConfig:
ks:List[int] = field(default_factory=lambda: [5])
@dataclass
class EarlyStopConfig:
early_stop:int=25
early_stop_measure:str='NDCG@10'
@dataclass
class ExperimentConfig:
debug:bool=False
save_dir:str='saves'
num_epochs:int=10
batch_size:int=256
verbose:int=0
print_step:int=1
test_step:int=1
test_from:int=1
model_name:str='EASE'
num_exp:int=5
seed:int=2020
gpu:int=0
def load_config():
dataset_config = OmegaConf.structured({'dataset' :DatasetConfig})
evaluator_config = OmegaConf.structured({'evaluator': EvaluatorConfig})
early_stop_config = OmegaConf.structured({'early_stop': EarlyStopConfig})
experiment_config = OmegaConf.structured({'experiment': ExperimentConfig})
model_name = experiment_config.experiment.model_name
# model_config = OmegaConf.structured({'hparams': OmegaConf.load(f"conf/{model_name}.yaml")})
model_config = OmegaConf.structured(OmegaConf.load(f"conf/{model_name}.yaml"))
config = OmegaConf.merge(dataset_config, evaluator_config, early_stop_config, experiment_config, model_config)
return config
if __name__ == '__main__':
config = load_config()
print(config) | 1,804 | 27.203125 | 114 | py |
RecSys_PyTorch | RecSys_PyTorch-master/trainer/helper_func.py | import copy
from time import time
def fit_model(model, dataset, exp_config, evaluator, early_stop, loggers, run_n=-1):
# initialize experiment
early_stop.initialize()
# train model
fit_start = time()
best_valid_score = model.fit(dataset, exp_config, evaluator, early_stop, loggers)
train_time = time() - fit_start
return best_valid_score, train_time | 379 | 30.666667 | 85 | py |
RecSys_PyTorch | RecSys_PyTorch-master/trainer/__init__.py | from .SingleParamRepeat import SingleParamRepeat | 48 | 48 | 48 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/evaluator.py | import time
import numpy as np
from typing import Iterable
from collections import OrderedDict
from .backend import eval_func_router, predict_topk_func
from data.data_batcher import DataBatcher
from utils.types import sparse_to_dict
class Evaluator:
def __init__(self, eval_input, eval_target, protocol, ks, eval_batch_size=1024):
"""
"""
self.top_k = sorted(list(ks)) if isinstance(ks, Iterable) else [ks]
self.max_k = max(self.top_k)
self.batch_size = eval_batch_size
self.eval_input = eval_input
self.eval_target = sparse_to_dict(eval_target)
self.protocol = protocol
self._register_eval_func()
def evaluate(self, model, mean=True):
# Switch to eval mode
model.eval()
# eval users
eval_users = np.array(list(self.eval_target.keys()))
num_users = len(eval_users)
num_items = self.eval_input.shape
output = model.predict(eval_users, self.eval_input, self.batch_size)
pred = self.predict_topk(output.astype(np.float32), self.max_k)
score_cumulator = self.eval_func(pred, self.eval_target, self.top_k)
scores = {}
for metric in score_cumulator:
score_by_ks = score_cumulator[metric]
for k in score_by_ks:
if mean:
scores['%s@%d' % (metric, k)] = score_by_ks[k].mean
else:
scores['%s@%d' % (metric, k)] = score_by_ks[k].history
# return
return scores
def _register_eval_func(self):
self.eval_func = eval_func_router[self.protocol]
self.predict_topk = predict_topk_func | 1,696 | 29.854545 | 84 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/__init__.py | from .evaluator import Evaluator | 32 | 32 | 32 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/__init__.py | HOLDOUT_METRICS = ['Prec', 'Recall', 'NDCG']
LOO_METRICS = ['HR', 'NDCG']
try:
from .cython.loo import compute_loo_metrics_cy
from .cython.holdout import compute_holdout_metrics_cy
from .cython.func import predict_topk_cy
CYTHON_OK = True
except:
print('evaluation with python backend...')
from .python.loo import compute_loo_metrics_py
from .python.holdout import compute_holdout_metrics_py
from .python.func import predict_topk_py
CYTHON_OK = False
if CYTHON_OK:
eval_func_router = {
'leave_one_out': compute_loo_metrics_cy,
'holdout': compute_holdout_metrics_cy
}
predict_topk_func = predict_topk_cy
else:
eval_func_router = {
'leave_one_out': compute_loo_metrics_py,
'holdout': compute_holdout_metrics_py
}
predict_topk_func = predict_topk_py | 846 | 28.206897 | 58 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/python/holdout.py | import math
from collections import OrderedDict
import numpy as np
from utils.stats import Statistics
from .. import HOLDOUT_METRICS
# from evaluation.backend import HOLDOUT_METRICS
# HOLDOUT_METRICS = ['Prec', 'Recall', 'NDCG']
def compute_holdout_metrics_py(pred, target, ks):
score_cumulator = OrderedDict()
for metric in HOLDOUT_METRICS:
score_cumulator[metric] = {k: Statistics('%s@%d' % (metric, k)) for k in ks}
hits = []
for idx, u in enumerate(target):
pred_u = pred[idx]
target_u = target[u]
num_target_items = len(target_u)
for k in ks:
pred_k = pred_u[:k]
hits_k = [(i + 1, item) for i, item in enumerate(pred_k) if item in target_u]
num_hits = len(hits_k)
idcg_k = 0.0
for i in range(1, min(num_target_items, k) + 1):
idcg_k += 1 / math.log(i + 1, 2)
dcg_k = 0.0
for idx, item in hits_k:
dcg_k += 1 / math.log(idx + 1, 2)
prec_k = num_hits / k
recall_k = num_hits / num_target_items
ndcg_k = dcg_k / idcg_k
score_cumulator['Prec'][k].update(prec_k)
score_cumulator['Recall'][k].update(recall_k)
score_cumulator['NDCG'][k].update(ndcg_k)
hits.append(len(hits_k))
return score_cumulator
# class HoldoutEvaluator:
# def __init__(self, top_k, eval_pos, eval_target, eval_neg_candidates=None):
# self.top_k = top_k
# self.max_k = max(top_k)
# self.eval_pos = eval_pos
# self.eval_target = eval_target
# self.eval_neg_candidates = eval_neg_candidates
# def init_score_cumulator(self):
# score_cumulator = OrderedDict()
# for metric in ['Prec', 'Recall', 'NDCG']:
# score_cumulator[metric] = {k: Statistics('%s@%d' % (metric, k)) for k in self.top_k}
# return score_cumulator
# def compute_metrics(self, topk, target, score_cumulator=None):
# if score_cumulator is None:
# score_cumulator = self.init_score_cumulator()
# hits = []
# for idx, u in enumerate(target):
# pred_u = topk[idx]
# target_u = target[u]
# num_target_items = len(target_u)
# for k in self.top_k:
# pred_k = pred_u[:k]
# hits_k = [(i + 1, item) for i, item in enumerate(pred_k) if item in target_u]
# num_hits = len(hits_k)
# idcg_k = 0.0
# for i in range(1, min(num_target_items, k) + 1):
# idcg_k += 1 / math.log(i + 1, 2)
# dcg_k = 0.0
# for idx, item in hits_k:
# dcg_k += 1 / math.log(idx + 1, 2)
# if num_hits:
# pass
# prec_k = num_hits / k
# recall_k = num_hits / min(num_target_items, k)
# ndcg_k = dcg_k / idcg_k
# score_cumulator['Prec'][k].update(prec_k)
# score_cumulator['Recall'][k].update(recall_k)
# score_cumulator['NDCG'][k].update(ndcg_k)
# hits.append(len(hits_k))
# return score_cumulator | 3,298 | 34.473118 | 98 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/python/loo.py | import math
from collections import OrderedDict
import numpy as np
from utils.stats import Statistics
from .. import LOO_METRICS
# from evaluation.backend import LOO_METRICS
# LOO_METRICS = ['HR', 'NDCG']
def compute_loo_metrics_py(pred, target, ks):
score_cumulator = OrderedDict()
for metric in LOO_METRICS:
score_cumulator[metric] = {k: Statistics('%s@%d' % (metric, k)) for k in ks}
max_k = max(ks)
for idx, u in enumerate(target):
pred_u = pred[idx]
target_u = target[u][0]
hit_at_k = np.where(pred_u == target_u)[0][0] + 1 if target_u in pred_u else max_k + 1
for k in ks:
hr_k = 1 if hit_at_k <= k else 0
ndcg_k = 1 / math.log(hit_at_k + 1, 2) if hit_at_k <= k else 0
score_cumulator['HR'][k].update(hr_k)
score_cumulator['NDCG'][k].update(ndcg_k)
return score_cumulator | 905 | 29.2 | 94 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/python/__init__.py | 0 | 0 | 0 | py | |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/python/func.py | from time import time
import numpy as np
def predict_topk_py(scores, max_k):
# top_k item index (not sorted)
s = time()
relevant_items_partition = (-scores).argpartition(max_k, 1)[:, 0:max_k]
# top_k item score (not sorted)
relevant_items_partition_original_value = np.take_along_axis(scores, relevant_items_partition, 1)
# top_k item sorted index for partition
relevant_items_partition_sorting = np.argsort(-relevant_items_partition_original_value, 1)
# sort top_k index
topk = np.take_along_axis(relevant_items_partition, relevant_items_partition_sorting, 1)
return topk | 629 | 34 | 101 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/cython/holdout.py | import math
from collections import OrderedDict
import numpy as np
from utils.stats import Statistics
try:
from .holdout_func import compute_holdout
except:
raise ImportError('Holdout pyx import error')
from .. import HOLDOUT_METRICS
# HOLDOUT_METRICS = ['Prec', 'Recall', 'NDCG']
def compute_holdout_metrics_cy(pred, target, ks):
score_cumulator = OrderedDict()
for metric in HOLDOUT_METRICS:
score_cumulator[metric] = {k: Statistics('%s@%d' % (metric, k)) for k in ks}
# Cython compute
# | M1@10 | M1@100 | M2@10 | M2@100| ...
# (top_k, ground_truth, num_eval_users, metrics_num, Ks):
results = compute_holdout(pred.astype(np.int32), target, len(HOLDOUT_METRICS), np.array(ks, dtype=np.int32))
for idx, u in enumerate(target):
user_results = results[idx].tolist()
for i, metric in enumerate(HOLDOUT_METRICS):
for j, k in enumerate(ks):
score_cumulator[metric][k].update(user_results[i * len(ks) + j])
return score_cumulator | 1,030 | 34.551724 | 112 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/cython/loo.py | import math
from collections import OrderedDict
import numpy as np
from utils.stats import Statistics
try:
from .loo_func import compute_loo
except:
raise ImportError('Cython loo import error')
from .. import LOO_METRICS
# from evaluation.backend import LOO_METRICS
def compute_loo_metrics_cy(pred, target, ks):
score_cumulator = OrderedDict()
for metric in LOO_METRICS:
score_cumulator[metric] = {k: Statistics('%s@%d' % (metric, k)) for k in ks}
results = compute_loo(pred.astype(np.int32), target, 2, np.array(ks))
for idx, u in enumerate(target):
user_results = results[idx].tolist()
for i, metric in enumerate(LOO_METRICS):
for j, k in enumerate(ks):
score_cumulator[metric][k].update(user_results[i * len(ks) + j])
return score_cumulator | 833 | 29.888889 | 84 | py |
RecSys_PyTorch | RecSys_PyTorch-master/evaluation/backend/cython/__init__.py | 0 | 0 | 0 | py | |
RecSys_PyTorch | RecSys_PyTorch-master/experiment/early_stop.py | class EarlyStop:
def __init__(self, early_stop, early_stop_measure):
self.endure = 0
self.early_stop = early_stop
self.early_stop_measure = early_stop_measure
self.best_epoch = None
self.best_score = None
def initialize(self):
self.best_epoch = None
self.best_score = None
def step(self, score, epoch):
# Always continue (shoudl_stop=False) if early_stop is not used
if self.early_stop_measure == 'all':
# Early stop if 'every' measure doesn't improve
# Save individual best score & epoch
if self.best_score is None:
best_score = score
self.best_epoch = {m: epoch for m in best_score}
not_updated = False
else:
not_updated = True
for metric in self.best_score:
if score[metric] > self.best_score[metric]:
self.best_score[metric] = score[metric]
self.best_epoch[metric] = epoch
not_updated = False
else:
# Early stop if specific measure doesn't improve
# Save best score & epoch at the best epoch of the standard measure
if self.best_score is None:
self.best_score = score
self.best_epoch = epoch
not_updated = False
else:
if score[self.early_stop_measure] > self.best_score[self.early_stop_measure]:
self.best_epoch = epoch
self.best_score = score
not_updated = False
else:
not_updated = True
should_stop = False
if not_updated:
self.endure += 1
if self.early_stop and self.endure >= self.early_stop:
should_stop = True
else:
self.endure = 0
should_stop = False
if self.early_stop < 1:
should_stop = False
return not not_updated, should_stop | 2,092 | 35.086207 | 93 | py |
RecSys_PyTorch | RecSys_PyTorch-master/experiment/hparam_search.py | import os
import time
import copy
import optuna
from experiment import fit_model
from utils import ResultTable, set_random_seed
from logger import Logger
class GridSearch:
def __init__(self, model_base, dataset, early_stop, config, device, seed=2020, num_parallel=1):
self.model_base = model_base
self.dataset = dataset
self.early_stop = early_stop
self.metric_to_optimize = early_stop.early_stop_measure
self.config = config
self.seed = seed
self.num_parallel = num_parallel
self.exp_logger = []
self.search_params = config['GridSearch']
self.search_space = self.generate_search_space()
self.search_param_names = list(self.search_params.keys())
# (score, [list of params])
self.result = []
self.valid_score = []
self.exp_num = 0
self.best_exp_num = 0
self.best_score = -1
self.best_params = None
self.device = device
def generate_search_space(self):
search_params = list(self.search_params.keys())
search_space = []
for i, param_name in enumerate(search_params):
new_space = []
if not isinstance(self.search_params[param_name], list):
self.config[param_name] = self.search_params[param_name]
for param_value in self.search_params[param_name]:
if i == 0:
new_space.append({param_name: param_value})
else:
tmp_list = copy.deepcopy(search_space)
for param_setting in tmp_list:
param_setting[param_name] = param_value
new_space += tmp_list
search_space = new_space
return search_space
def optimize(self, model, evaluator, early_stop, neptune, logger, config, fold=None):
valid_score, train_time = fit_model(model, self.dataset, evaluator, early_stop, neptune, logger, config)
self.valid_score.append(valid_score)
score = valid_score[self.metric_to_optimize]
return score
def objective_function(self, cur_space):
base_log_dir = self.base_logger.log_dir
self.exp_num += 1
# update config
config_copy = copy.deepcopy(self.base_config)
config_copy.update_params(cur_space)
# create model
set_random_seed(self.seed)
model = self.model_base(self.dataset, config_copy['Model'], self.device)
exp_logger = self.init_search_logger(base_log_dir, self.exp_num)
config_path = os.path.join(exp_logger.log_dir, 'model_config.cfg')
self.config.save_model_config(config_path)
if self.neptune is not None:
dest = os.path.join(*config_path.split('/')[3:])
self.neptune.log_artifact(config_path, destination=dest)
score = self.optimize(model, self.evaluator, self.early_stop, self.neptune, exp_logger, config_copy, self.fold)
if score > self.best_score:
self.best_score = score
self.best_params = cur_space
self.base_logger.info('Exp %d value=%.4f, current best value=%.4f with parameters %s\n' % (self.exp_num, score, self.best_score, str(self.best_params)))
if self.neptune is not None:
main_log_path = os.path.join(exp_logger.log_dir, 'experiments.log')
dest = os.path.join(*main_log_path.split('/')[3:])
self.neptune.log_artifact(main_log_path, destination=dest)
exp_logger.close()
del model
return score
def init_search_logger(self, base_dir, exp_num):
exp_dir = os.path.join(base_dir, 'exp_%d' % exp_num)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
logger = Logger(exp_dir)
self.exp_logger.append(logger)
return logger
def init(self):
self.exp_logger = []
self.result = []
self.valid_score = []
self.exp_num = 0
self.best_exp_num = 0
self.best_score = -1
self.best_params = None
def search(self, evaluator, neptune_manager, logger, config, fold=None):
self.result = []
self.neptune = neptune_manager
self.evaluator=evaluator
self.base_logger=logger
self.base_config=config
self.fold=fold
start = time.time()
scores = [self.objective_function(cur_space) for cur_space in self.search_space]
search_time = time.time() - start
results = [
{'number': i, 'value': scores[i], 'params': cur_space}
for i, cur_space in enumerate(self.search_space)]
all_trials = sorted(results, key=lambda x: x['value'], reverse=True)
best_trial = all_trials[0]
self.best_exp_num = best_trial['number']
self.best_score = best_trial['value']
self.best_params = best_trial['params']
search_result_table = ResultTable(table_name='Param Search Result', header=list(self.best_params.keys()) + [self.metric_to_optimize], float_formatter='%.6f')
for trial in all_trials:
row_dict = {}
row_dict[self.metric_to_optimize] = trial['value']
for k, v in trial['params'].items():
row_dict[k] = v
search_result_table.add_row('Exp %d' % trial['number'], row_dict)
return search_result_table, search_time
@ property
def best_result(self):
best_dir = self.exp_logger[self.best_exp_num].log_dir
best_valid_score = self.valid_score[self.best_exp_num]
best_config = copy.deepcopy(self.config)
best_config.update_params(self.best_params)
return best_dir, best_valid_score, best_config['Model']
class BayesianSearch:
def __init__(self, model_base, dataset, early_stop, config, device, seed=2020, num_trials=10, num_parallel=1):
self.model_base = model_base
self.dataset = dataset
self.early_stop = early_stop
self.metric_to_optimize = early_stop.early_stop_measure
self.config = config
self.seed = seed
self.num_trials = num_trials
self.num_parallel = num_parallel
self.exp_logger = []
self.search_params = config['BayesSearch']
# (score, [list of params])
self.result = None
self.valid_score = []
self.exp_num = 0
self.best_exp_num = 0
self.best_score = -1
self.best_param = None
self.device = device
def generate_search_space(self, trial):
# For integer: ('int', [low, high])
# For float: ('float', 'domai', [low, high])
# For categorical: ('categorical', [list of choices])
search_spaces = {}
for param_name in self.search_params:
space = self.search_params[param_name]
space_type = space[0]
if space_type == 'categorical':
search_spaces[param_name] = trial.suggest_categorical(param_name, space[1])
elif space_type == 'int':
[low, high] = space[1]
search_spaces[param_name] = trial.suggest_int(param_name, low, high)
elif space_type == 'float':
domain = space[1]
[low, high] = space[2]
if domain == 'uniform':
search_spaces[param_name] = trial.suggest_uniform(param_name, low, high)
elif domain == 'loguniform':
search_spaces[param_name] = trial.suggest_loguniform(param_name, low, high)
else:
raise ValueError('Unsupported float search domain: %s' % domain)
else:
raise ValueError('Search parameter type error: %s' % space_type)
return search_spaces
def optimize(self, model, early_stop, logger, config):
valid_score, train_time = fit_model(model, self.dataset, self.evaluator, early_stop, None, logger, config)
self.valid_score.append(valid_score)
score = valid_score[self.metric_to_optimize]
return score
def objective_function(self, trial):
# update config
config_copy = copy.deepcopy(self.config)
# config_to_update = dict(zip(self.search_param_names, cur_space))
search_params = self.generate_search_space(trial)
config_copy.update_params(search_params)
# create model
set_random_seed(self.seed)
model = self.model_base(self.dataset, config_copy['Model'], self.device)
exp_logger = self.init_search_logger()
if self.neptune is not None:
config_path = os.path.join(exp_logger.log_dir, 'model_config.cfg')
self.config.save_model_config(config_path)
dest = os.path.join(*config_path.split('/')[3:])
self.neptune.log_artifact(config_path, destination=dest)
score = self.optimize(model, self.early_stop, exp_logger, config_copy)
if self.neptune is not None:
main_log_path = os.path.join(exp_logger.log_dir, 'experiments.log')
dest = os.path.join(*main_log_path.split('/')[3:])
self.neptune.log_artifact(main_log_path, destination=dest)
exp_logger.close()
self.exp_num += 1
del model
return score
def init_search_logger(self):
exp_dir = os.path.join(self.base_dir, 'exp_%d' % self.exp_num)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
logger = Logger(exp_dir)
self.exp_logger.append(logger)
return logger
def init(self):
self.exp_logger = []
self.result = []
self.valid_score = []
self.exp_num = 0
self.best_exp_num = 0
self.best_score = -1
self.best_params = None
def search(self, evaluator, neptune_manager, logger, config, fold=None):
self.evaluator = evaluator
self.base_logger = logger
self.base_dir = logger.log_dir
self.neptune = neptune_manager
self.fold = fold
start = time.time()
self.study = optuna.create_study(direction='maximize')
self.study.optimize(self.objective_function, n_trials=self.num_trials, n_jobs=self.num_parallel)
search_time = time.time() - start
all_trials = sorted(self.study.trials, key=lambda x: x.value, reverse=True)
best_trial = all_trials[0]
self.best_exp_num = best_trial.number
self.best_score = best_trial.value
self.best_params = best_trial.params
search_result_table = ResultTable(table_name='Param Search Result', header=list(self.best_params.keys()) + [self.metric_to_optimize], float_formatter='%.6f')
for trial in all_trials:
row_dict = {}
row_dict[self.metric_to_optimize] = trial.value
for k, v in trial.params.items():
row_dict[k] = v
search_result_table.add_row('Exp %d' % trial.number, row_dict)
if optuna.visualization.is_available():
optuna.visualization.plot_optimization_history(self.study)
return search_result_table, search_time
@ property
def best_result(self):
best_dir = self.exp_logger[self.best_exp_num].log_dir
best_valid_score = self.valid_score[self.best_exp_num]
best_config = copy.deepcopy(self.config)
best_config.update_params(self.best_params)
return best_dir, best_valid_score, best_config['Model'] | 11,595 | 34.033233 | 165 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/RP3b.py | """
Bibek Paudel et al., Updatable, accurate, diverse, and scalablerecommendations for interactive applications. TiiS 2017.
https://www.zora.uzh.ch/id/eprint/131338/1/TiiS_2016.pdf
Main model codes from https://github.com/MaurizioFD/RecSys2019_DeepLearning_Evaluation
"""
import torch
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
from sklearn.preprocessing import normalize
from tqdm import tqdm
from models.BaseModel import BaseModel
class RP3b(BaseModel):
def __init__(self, dataset, hparams, device):
super(RP3b, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.topk = hparams['topk']
self.alpha = hparams['alpha']
self.beta = hparams['beta']
def fit_rp3b(self, train_matrix, block_dim=200):
num_items = train_matrix.shape[1]
Pui = normalize(train_matrix, norm='l1', axis=1)
# Piu is the column-normalized, "boolean" urm transposed
X_bool = train_matrix.transpose(copy=True)
X_bool.data = np.ones(X_bool.data.size, np.float32)
# Taking the degree of each item to penalize top popular
# Some rows might be zero, make sure their degree remains zero
X_bool_sum = np.array(X_bool.sum(axis=1)).ravel()
degree = np.zeros(train_matrix.shape[1])
nonZeroMask = X_bool_sum != 0.0
degree[nonZeroMask] = np.power(X_bool_sum[nonZeroMask], -self.beta)
Piu = normalize(X_bool, norm='l1', axis=1)
del(X_bool)
if self.alpha != 1:
Pui = Pui.power(self.alpha)
Piu = Piu.power(self.alpha)
# Use array as it reduces memory requirements compared to lists
dataBlock = 10000000
rows = np.zeros(dataBlock, dtype=np.int32)
cols = np.zeros(dataBlock, dtype=np.int32)
values = np.zeros(dataBlock, dtype=np.float32)
numCells = 0
item_blocks = range(0, num_items, block_dim)
tqdm_iterator = tqdm(item_blocks, desc='# items blocks covered', total=len(item_blocks))
for cur_items_start_idx in tqdm_iterator:
if cur_items_start_idx + block_dim > num_items:
block_dim = num_items - cur_items_start_idx
# second * third transition matrix: # of ditinct path from item to item
# block_dim x item
Piui = Piu[cur_items_start_idx:cur_items_start_idx + block_dim, :] * Pui
Piui = Piui.toarray()
for row_in_block in range(block_dim):
# Delete self connection
row_data = np.multiply( Piui[row_in_block, :], degree)
row_data[cur_items_start_idx + row_in_block] = 0
# Top-k items
best = row_data.argsort()[::-1][:self.topk]
# add non-zero top-k path only (efficient)
notZerosMask = row_data[best] != 0.0
values_to_add = row_data[best][notZerosMask]
cols_to_add = best[notZerosMask]
for index in range(len(values_to_add)):
if numCells == len(rows):
rows = np.concatenate((rows, np.zeros(dataBlock, dtype=np.int32)))
cols = np.concatenate((cols, np.zeros(dataBlock, dtype=np.int32)))
values = np.concatenate((values, np.zeros(dataBlock, dtype=np.float32)))
rows[numCells] = cur_items_start_idx + row_in_block
cols[numCells] = cols_to_add[index]
values[numCells] = values_to_add[index]
numCells += 1
self.W_sparse = sp.csr_matrix((values[:numCells], (rows[:numCells], cols[:numCells])), shape=(Pui.shape[1], Pui.shape[1]))
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.fit_rp3b(train_matrix.tocsc())
output = train_matrix @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
# eval_pos_matrix
preds = (eval_pos * self.W_sparse).toarray()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,686 | 36.496 | 130 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/PureSVD.py | import numpy as np
import scipy.sparse as sp
from sklearn.utils.extmath import randomized_svd
import torch
import torch.nn.functional as F
from models.BaseModel import BaseModel
class PureSVD(BaseModel):
def __init__(self, dataset, hparams, device):
super(PureSVD, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.num_factors = hparams['num_factors']
self.device = device
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
# Solve EASE
train_matrix = dataset.train_data.toarray()
U, sigma, Vt = randomized_svd(train_matrix, n_components=self.num_factors, random_state=123)
s_Vt = sp.diags(sigma) * Vt
self.user_embedding = U
self.item_embedding = s_Vt.T
output = self.user_embedding @ self.item_embedding.T
loss = F.binary_cross_entropy(torch.tensor(train_matrix), torch.tensor(output))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict_batch_users(self, user_ids):
user_latent = self.user_embedding[user_ids]
return user_latent @ self.item_embedding.T
def predict(self, eval_users, eval_pos, test_batch_size):
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
pred_matrix[batch_users] = self.predict_batch_users(batch_users)
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix | 2,227 | 33.8125 | 100 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/ItemKNN.py | """
Jun Wang et al., Unifying user-based and item-based collaborative filtering approaches by similarity fusion. SIGIR 2006.
http://web4.cs.ucl.ac.uk/staff/jun.wang/papers/2006-sigir06-unifycf.pdf
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
from tqdm import tqdm
from models.BaseModel import BaseModel
class ItemKNN(BaseModel):
def __init__(self, dataset, hparams, device):
super(ItemKNN, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.topk = hparams['topk']
self.shrink = hparams['shrink']
self.feature_weighting = hparams['feature_weighting']
assert self.feature_weighting in ['tf-idf', 'bm25', 'none']
def fit_knn(self, train_matrix, block_size=500):
if self.feature_weighting == 'tf-idf':
train_matrix = self.TF_IDF(train_matrix.T).T
elif self.feature_weighting == 'bm25':
train_matrix = self.okapi_BM25(train_matrix.T).T
train_matrix = train_matrix.tocsc()
num_items = train_matrix.shape[1]
start_col_local = 0
end_col_local = num_items
start_col_block = start_col_local
this_block_size = 0
block_size = 500
sumOfSquared = np.array(train_matrix.power(2).sum(axis=0)).ravel()
sumOfSquared = np.sqrt(sumOfSquared)
values = []
rows = []
cols = []
while start_col_block < end_col_local:
end_col_block = min(start_col_block + block_size, end_col_local)
this_block_size = end_col_block-start_col_block
# All data points for a given item
# item_data: user, item blocks
item_data = train_matrix[:, start_col_block:end_col_block]
item_data = item_data.toarray().squeeze()
# If only 1 feature avoid last dimension to disappear
if item_data.ndim == 1:
item_data = np.atleast_2d(item_data)
this_block_weights = train_matrix.T.dot(item_data)
for col_index_in_block in range(this_block_size):
# this_block_size: (item,)
# similarity between 'one block item' and whole items
if this_block_size == 1:
this_column_weights = this_block_weights
else:
this_column_weights = this_block_weights[:,col_index_in_block]
# columnIndex = item index
# zero out self similarity
columnIndex = col_index_in_block + start_col_block
this_column_weights[columnIndex] = 0.0
# cosine similarity
# denominator = sqrt(l2_norm(x)) * sqrt(l2_norm(y))+ shrinkage + eps
denominator = sumOfSquared[columnIndex] * sumOfSquared + self.shrink + 1e-6
this_column_weights = np.multiply(this_column_weights, 1 / denominator)
relevant_items_partition = (-this_column_weights).argpartition(self.topk-1)[0:self.topk]
relevant_items_partition_sorting = np.argsort(-this_column_weights[relevant_items_partition])
top_k_idx = relevant_items_partition[relevant_items_partition_sorting]
# Incrementally build sparse matrix, do not add zeros
notZerosMask = this_column_weights[top_k_idx] != 0.0
numNotZeros = np.sum(notZerosMask)
values.extend(this_column_weights[top_k_idx][notZerosMask])
rows.extend(top_k_idx[notZerosMask])
cols.extend(np.ones(numNotZeros) * columnIndex)
start_col_block += block_size
self.W_sparse = sp.csr_matrix((values, (rows, cols)),
shape=(num_items, num_items),
dtype=np.float32)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.fit_knn(train_matrix)
output = train_matrix @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.W_sparse)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds
def okapi_BM25(self, rating_matrix, K1=1.2, B=0.75):
assert B>0 and B<1, "okapi_BM_25: B must be in (0,1)"
assert K1>0, "okapi_BM_25: K1 must be > 0"
# Weighs each row of a sparse matrix by OkapiBM25 weighting
# calculate idf per term (user)
rating_matrix = sp.coo_matrix(rating_matrix)
N = float(rating_matrix.shape[0])
idf = np.log(N / (1 + np.bincount(rating_matrix.col)))
# calculate length_norm per document
row_sums = np.ravel(rating_matrix.sum(axis=1))
average_length = row_sums.mean()
length_norm = (1.0 - B) + B * row_sums / average_length
# weight matrix rows by bm25
rating_matrix.data = rating_matrix.data * (K1 + 1.0) / (K1 * length_norm[rating_matrix.row] + rating_matrix.data) * idf[rating_matrix.col]
return rating_matrix.tocsr()
def TF_IDF(self, rating_matrix):
"""
Items are assumed to be on rows
:param dataMatrix:
:return:
"""
# TFIDF each row of a sparse amtrix
rating_matrix = sp.coo_matrix(rating_matrix)
N = float(rating_matrix.shape[0])
# calculate IDF
idf = np.log(N / (1 + np.bincount(rating_matrix.col)))
# apply TF-IDF adjustment
rating_matrix.data = np.sqrt(rating_matrix.data) * idf[rating_matrix.col]
return rating_matrix.tocsr() | 6,781 | 36.677778 | 146 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/MultVAE.py | """
Dawen Liang et al., Variational Autoencoders for Collaborative Filtering. WWW 2018.
https://arxiv.org/pdf/1802.05814
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .BaseModel import BaseModel
from data.generators import MatrixGenerator
class MultVAE(BaseModel):
def __init__(self, dataset, hparams, device):
super(MultVAE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
if isinstance(hparams['enc_dims'], str):
hparams['enc_dims'] = eval(hparams['enc_dims'])
self.enc_dims = [self.num_items] + list(hparams['enc_dims'])
self.dec_dims = self.enc_dims[::-1]
self.dims = self.enc_dims + self.dec_dims[1:]
self.total_anneal_steps = hparams['total_anneal_steps']
self.anneal_cap = hparams['anneal_cap']
self.dropout = hparams['dropout']
self.eps = 1e-6
self.anneal = 0.
self.update_count = 0
self.device = device
self.encoder = nn.ModuleList()
for i, (d_in, d_out) in enumerate(zip(self.enc_dims[:-1], self.enc_dims[1:])):
if i == len(self.enc_dims[:-1]) - 1:
d_out *= 2
self.encoder.append(nn.Linear(d_in, d_out))
if i != len(self.enc_dims[:-1]) - 1:
self.encoder.append(nn.Tanh())
self.decoder = nn.ModuleList()
for i, (d_in, d_out) in enumerate(zip(self.dec_dims[:-1], self.dec_dims[1:])):
self.decoder.append(nn.Linear(d_in, d_out))
if i != len(self.dec_dims[:-1]) - 1:
self.decoder.append(nn.Tanh())
self.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, rating_matrix):
# encoder
h = F.dropout(F.normalize(rating_matrix), p=self.dropout, training=self.training)
for layer in self.encoder:
h = layer(h)
# sample
mu_q = h[:, :self.enc_dims[-1]]
logvar_q = h[:, self.enc_dims[-1]:] # log sigmod^2 batch x 200
std_q = torch.exp(0.5 * logvar_q) # sigmod batch x 200
epsilon = torch.zeros_like(std_q).normal_(mean=0, std=0.01)
sampled_z = mu_q + self.training * epsilon * std_q
output = sampled_z
for layer in self.decoder:
output = layer(output)
if self.training:
kl_loss = ((0.5 * (-logvar_q + torch.exp(logvar_q) + torch.pow(mu_q, 2) - 1)).sum(1)).mean()
return output, kl_loss
else:
return output
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
# user, item, rating pairs
train_matrix = dataset.train_data
num_training = train_matrix.shape[0]
num_batches = int(np.ceil(num_training / exp_config.batch_size))
batch_generator = MatrixGenerator(train_matrix, batch_size=exp_config.batch_size, shuffle=True, device=self.device)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, batch_matrix in enumerate(batch_generator):
self.optimizer.zero_grad()
if self.total_anneal_steps > 0:
self.anneal = min(self.anneal_cap, 1. * self.update_count / self.total_anneal_steps)
else:
self.anneal = self.anneal_cap
pred_matrix, kl_loss = self.forward(batch_matrix)
# cross_entropy
ce_loss = F.binary_cross_entropy_with_logits(pred_matrix, batch_matrix, reduction='none').sum(1).mean()
batch_loss = ce_loss + kl_loss * self.anneal
batch_loss.backward()
self.optimizer.step()
self.update_count += 1
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def predict(self, eval_users, eval_pos, test_batch_size):
with torch.no_grad():
input_matrix = torch.FloatTensor(eval_pos.toarray()).to(self.device)
preds = np.zeros(eval_pos.shape)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = self.forward(test_batch_matrix)
preds[batch_idx] = batch_pred_matrix.detach().cpu().numpy()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 5,994 | 37.429487 | 123 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/P3a.py | """
Colin Cooper et al., Random Walks in Recommender Systems: Exact Computation and Simulations. WWW 2014.
http://wwwconference.org/proceedings/www2014/companion/p811.pdf
Main model codes from https://github.com/MaurizioFD/RecSys2019_DeepLearning_Evaluation
"""
import torch
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
from sklearn.preprocessing import normalize
from tqdm import tqdm
from models.BaseModel import BaseModel
class P3a(BaseModel):
def __init__(self, dataset, hparams, device):
super(P3a, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.topk = hparams['topk']
self.alpha = hparams['alpha']
def fit_p3a(self, train_matrix, block_dim=200):
num_items = train_matrix.shape[1]
# (user, item), 1 / # user
Pui = normalize(train_matrix, norm='l1', axis=1)
X_bool = train_matrix.transpose(copy=True)
X_bool.data = np.ones(X_bool.data.size, np.float32)
# (item, uesr), 1 / # item
Piu = normalize(X_bool, norm='l1', axis=1)
del(X_bool)
if self.alpha != 1:
Pui = Pui.power(self.alpha)
Piu = Piu.power(self.alpha)
# Use array as it reduces memory requirements compared to lists
dataBlock = 10000000
rows = np.zeros(dataBlock, dtype=np.int32)
cols = np.zeros(dataBlock, dtype=np.int32)
values = np.zeros(dataBlock, dtype=np.float32)
numCells = 0
item_blocks = range(0, num_items, block_dim)
tqdm_iterator = tqdm(item_blocks, desc='# items blocks covered', total=len(item_blocks))
for cur_items_start_idx in tqdm_iterator:
if cur_items_start_idx + block_dim > num_items:
block_dim = num_items - cur_items_start_idx
# second * third transition matrix: # of ditinct path from item to item
# block_dim x item
Piui = Piu[cur_items_start_idx:cur_items_start_idx + block_dim, :] * Pui
Piui = Piui.toarray()
for row_in_block in range(block_dim):
# Delete self connection
row_data = Piui[row_in_block, :]
row_data[cur_items_start_idx + row_in_block] = 0
# Top-k items
best = row_data.argsort()[::-1][:self.topk]
# add non-zero top-k path only (efficient)
notZerosMask = row_data[best] != 0.0
values_to_add = row_data[best][notZerosMask]
cols_to_add = best[notZerosMask]
for index in range(len(values_to_add)):
if numCells == len(rows):
rows = np.concatenate((rows, np.zeros(dataBlock, dtype=np.int32)))
cols = np.concatenate((cols, np.zeros(dataBlock, dtype=np.int32)))
values = np.concatenate((values, np.zeros(dataBlock, dtype=np.float32)))
rows[numCells] = cur_items_start_idx + row_in_block
cols[numCells] = cols_to_add[index]
values[numCells] = values_to_add[index]
numCells += 1
self.W_sparse = sp.csr_matrix((values[:numCells], (rows[:numCells], cols[:numCells])), shape=(Pui.shape[1], Pui.shape[1]))
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.fit_p3a(train_matrix.tocsc())
output = train_matrix @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.W_sparse)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,840 | 35.954198 | 130 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/CDAE.py | """
Yao Wu et al., Collaborative denoising auto-encoders for top-n recommender systems. WSDM 2016.
https://alicezheng.org/papers/wsdm16-cdae.pdf
"""
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import MatrixGenerator
class CDAE(BaseModel):
def __init__(self, dataset, hparams, device):
super(CDAE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.hidden_dim = hparams['hidden_dim']
self.act = hparams['act']
self.corruption_ratio = hparams['corruption_ratio']
self.device = device
self.user_embedding = nn.Embedding(self.num_users, self.hidden_dim)
self.encoder = nn.Linear(self.num_items, self.hidden_dim)
self.decoder = nn.Linear(self.hidden_dim, self.num_items)
self.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, user_id, rating_matrix):
# corruption
rating_matrix = F.dropout(rating_matrix, self.corruption_ratio, training=self.training)
# AE
enc = torch.tanh(self.encoder(rating_matrix) + self.user_embedding(user_id))
dec = self.decoder(enc)
return torch.sigmoid(dec)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
num_training = train_matrix.shape[0]
num_batches = int(np.ceil(num_training / exp_config.batch_size))
batch_generator = MatrixGenerator(train_matrix, return_index=True, batch_size=exp_config.batch_size, shuffle=True, device=self.device)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, (batch_matrix, batch_users) in enumerate(batch_generator):
self.optimizer.zero_grad()
pred_matrix = self.forward(batch_users, batch_matrix)
# cross_entropy
batch_loss = F.binary_cross_entropy(pred_matrix, batch_matrix, reduction='none').sum(1).mean()
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def predict(self, eval_users, eval_pos, test_batch_size):
with torch.no_grad():
input_matrix = torch.FloatTensor(eval_pos.toarray()).to(self.device)
preds = np.zeros(shape=input_matrix.shape)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_idx_tensor = torch.LongTensor(batch_idx).to(self.device)
batch_pred_matrix = self.forward(batch_idx_tensor, test_batch_matrix)
preds[batch_idx] = batch_pred_matrix.detach().cpu().numpy()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,533 | 38.086207 | 142 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/DAE.py | """
Yao Wu et al., Collaborative denoising auto-encoders for top-n recommender systems. WSDM 2016.
https://alicezheng.org/papers/wsdm16-cdae.pdf
"""
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import MatrixGenerator
class DAE(BaseModel):
def __init__(self, dataset, hparams, device):
super(DAE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.hidden_dim = hparams['hidden_dim']
self.act = hparams['act']
self.corruption_ratio = hparams['corruption_ratio']
self.device = device
self.encoder = nn.Linear(self.num_items, self.hidden_dim)
self.decoder = nn.Linear(self.hidden_dim, self.num_items)
self.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, rating_matrix):
# corruption
rating_matrix = F.dropout(rating_matrix, self.corruption_ratio, training=self.training)
# AE
enc = torch.tanh(self.encoder(rating_matrix))
dec = self.decoder(enc)
return torch.sigmoid(dec)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
# user, item, rating pairs
train_matrix = dataset.train_data
num_training = train_matrix.shape[0]
num_batches = int(np.ceil(num_training / exp_config.batch_size))
batch_generator = MatrixGenerator(train_matrix, batch_size=exp_config.batch_size, shuffle=True, device=self.device)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, batch_matrix in enumerate(batch_generator):
self.optimizer.zero_grad()
pred_matrix = self.forward(batch_matrix)
# cross_entropy
batch_loss = F.binary_cross_entropy(pred_matrix, batch_matrix, reduction='none').sum(1).mean()
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def predict(self, eval_users, eval_pos, test_batch_size):
with torch.no_grad():
input_matrix = torch.FloatTensor(eval_pos.toarray()).to(self.device)
preds = np.zeros(eval_pos.shape)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = self.forward(test_batch_matrix)
preds[batch_idx] += batch_pred_matrix.detach().cpu().numpy()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,313 | 36.513043 | 123 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/LightGCN.py | """
LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation,
Xiangnan He et al.,
SIGIR 2020.
"""
import os
import math
import time
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import PairwiseGenerator
class LightGCN(BaseModel):
def __init__(self, dataset, hparams, device):
super(LightGCN, self).__init__()
self.data_name = dataset.dataname
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.emb_dim = hparams['emb_dim']
self.num_layers = hparams['num_layers']
self.node_dropout = hparams['node_dropout']
self.split = hparams['split']
self.num_folds = hparams['num_folds']
self.reg = hparams['reg']
self.Graph = None
self.data_loader = None
self.path = hparams['graph_dir']
if not os.path.exists(self.path):
os.mkdir(self.path)
self.device = device
self.build_graph()
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def build_graph(self):
self.user_embedding = nn.Embedding(self.num_users, self.emb_dim)
self.item_embedding = nn.Embedding(self.num_items, self.emb_dim)
nn.init.normal_(self.user_embedding.weight, 0, 0.01)
nn.init.normal_(self.item_embedding.weight, 0, 0.01)
self.user_embedding_pred = None
self.item_embedding_pred = None
self.to(self.device)
def update_lightgcn_embedding(self):
self.user_embeddings, self.item_embeddings = self._lightgcn_embedding(self.Graph)
def forward(self, user_ids, item_ids):
user_emb = F.embedding(user_ids, self.user_embeddings)
item_emb = F.embedding(item_ids, self.item_embeddings)
pred_rating = torch.sum(torch.mul(user_emb, item_emb), 1)
return pred_rating
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.Graph = self.getSparseGraph(train_matrix)
batch_generator = PairwiseGenerator(
train_matrix, num_negatives=1, num_positives_per_user=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
num_batches = len(batch_generator)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, (batch_users, batch_pos, batch_neg) in enumerate(batch_generator):
self.optimizer.zero_grad()
batch_loss = self.process_one_batch(batch_users, batch_pos, batch_neg)
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def process_one_batch(self, users, pos_items, neg_items):
self.update_lightgcn_embedding()
pos_scores = self.forward(users, pos_items)
neg_scores = self.forward(users, neg_items)
loss = -F.sigmoid(pos_scores - neg_scores).log().mean()
return loss
def predict_batch_users(self, user_ids):
user_embeddings = F.embedding(user_ids, self.user_embeddings)
item_embeddings = self.item_embeddings
return user_embeddings @ item_embeddings.T
def predict(self, eval_users, eval_pos, test_batch_size):
self.update_lightgcn_embedding()
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
with torch.no_grad():
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
batch_users_torch = torch.LongTensor(batch_users).to(self.device)
pred_matrix[batch_users] = self.predict_batch_users(batch_users_torch).detach().cpu().numpy()
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix
##################################### LightGCN Code
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = torch.rand(len(values)) + keep_prob
random_index = random_index.int().bool()
index = index[random_index]
values = values[random_index]/keep_prob
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def _lightgcn_embedding(self, graph):
users_emb = self.user_embedding.weight
items_emb = self.item_embedding.weight
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
if self.node_dropout > 0:
if self.training:
g_droped = self.__dropout(graph, self.node_dropout)
else:
g_droped = graph
else:
g_droped = graph
for layer in range(self.num_layers):
if self.split:
temp_emb = []
for f in range(len(g_droped)):
temp_emb.append(torch.sparse.mm(g_droped[f], all_emb))
side_emb = torch.cat(temp_emb, dim=0)
all_emb = side_emb
else:
all_emb = torch.sparse.mm(g_droped, all_emb)
embs.append(all_emb)
embs = torch.stack(embs, dim=1)
light_out = torch.mean(embs, dim=1)
users, items = torch.split(light_out, [self.num_users, self.num_items])
return users, items
def make_train_matrix(self):
train_matrix_arr = self.dataset.train_matrix.toarray()
self.train_matrix = sp.csr_matrix(train_matrix_arr)
def _split_A_hat(self, A):
A_fold = []
fold_len = (self.num_users + self.num_items) // self.num_folds
for i_fold in range(self.num_folds):
start = i_fold*fold_len
if i_fold == self.num_folds - 1:
end = self.num_users + self.num_items
else:
end = (i_fold + 1) * fold_len
A_fold.append(self._convert_sp_mat_to_sp_tensor(A[start:end]).coalesce().to(self.device))
return A_fold
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
row = torch.Tensor(coo.row).long()
col = torch.Tensor(coo.col).long()
index = torch.stack([row, col])
data = torch.FloatTensor(coo.data)
return torch.sparse.FloatTensor(index, data, torch.Size(coo.shape))
def getSparseGraph(self, rating_matrix):
n_users, n_items = rating_matrix.shape
print("loading adjacency matrix")
filename = f'{self.data_name}_s_pre_adj_mat.npz'
try:
pre_adj_mat = sp.load_npz(os.path.join(self.path, filename))
print("successfully loaded...")
norm_adj = pre_adj_mat
except :
print("generating adjacency matrix")
s = time.time()
adj_mat = sp.dok_matrix((n_users + n_items, n_users + n_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = rating_matrix.tolil()
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat = adj_mat.todok()
# adj_mat = adj_mat + sp.eye(adj_mat.shape[0])
rowsum = np.array(adj_mat.sum(axis=1))
d_inv = np.power(rowsum, -0.5).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat = sp.diags(d_inv)
norm_adj = d_mat.dot(adj_mat)
norm_adj = norm_adj.dot(d_mat)
norm_adj = norm_adj.tocsr()
end = time.time()
print(f"costing {end-s}s, saved norm_mat...")
sp.save_npz(os.path.join(self.path, filename), norm_adj)
if self.split == True:
Graph = self._split_A_hat(norm_adj)
print("done split matrix")
else:
Graph = self._convert_sp_mat_to_sp_tensor(norm_adj)
Graph = Graph.coalesce().to(self.device)
print("don't split the matrix")
return Graph | 9,931 | 36.198502 | 109 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/NGCF.py | """
Neural Graph Collaborative Filtering,
Xiang Wang et al.,
SIGIR 2019.
[Official tensorflow]: https://github.com/xiangwang1223/neural_graph_collaborative_filtering
[PyTorch reference]: https://github.com/huangtinglin/NGCF-PyTorch
"""
import os
import math
import time
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import PairwiseGenerator
class NGCF(BaseModel):
def __init__(self, dataset, hparams, device):
super(NGCF, self).__init__()
self.data_name = dataset.dataname
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.emb_dim = hparams['emb_dim']
self.num_layers = hparams['num_layers']
self.node_dropout = hparams['node_dropout']
self.mess_dropout = hparams['mess_dropout']
self.split = hparams['split']
self.num_folds = hparams['num_folds']
self.reg = hparams['reg']
self.Graph = None
self.data_loader = None
self.path = hparams['graph_dir']
if not os.path.exists(self.path):
os.mkdir(self.path)
self.device = device
self.build_graph()
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def build_graph(self):
self.user_embedding = nn.Embedding(self.num_users, self.emb_dim)
self.item_embedding = nn.Embedding(self.num_items, self.emb_dim)
nn.init.normal_(self.user_embedding.weight, 0, 0.01)
nn.init.normal_(self.item_embedding.weight, 0, 0.01)
self.weight_dict = nn.ParameterDict()
layers = [self.emb_dim] * (self.num_layers + 1)
for k in range(len(layers)-1):
self.weight_dict.update({'W_gc_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(layers[k], layers[k+1])))})
self.weight_dict.update({'b_gc_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(1, layers[k+1])))})
self.weight_dict.update({'W_bi_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(layers[k], layers[k+1])))})
self.weight_dict.update({'b_bi_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(1, layers[k+1])))})
self.to(self.device)
def update_ngcf_embedding(self):
self.user_embeddings, self.item_embeddings = self._ngcf_embedding(self.Graph)
def forward(self, user_ids, item_ids):
user_emb = F.embedding(user_ids, self.user_embeddings)
item_emb = F.embedding(item_ids, self.item_embeddings)
pred_rating = torch.sum(torch.mul(user_emb, item_emb), 1)
return pred_rating
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.Graph = self.getSparseGraph(train_matrix)
batch_generator = PairwiseGenerator(
train_matrix, num_negatives=1, num_positives_per_user=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
num_batches = len(batch_generator)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, (batch_users, batch_pos, batch_neg) in enumerate(batch_generator):
self.optimizer.zero_grad()
batch_loss = self.process_one_batch(batch_users, batch_pos, batch_neg)
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def process_one_batch(self, users, pos_items, neg_items):
self.update_ngcf_embedding()
pos_scores = self.forward(users, pos_items)
neg_scores = self.forward(users, neg_items)
loss = -F.sigmoid(pos_scores - neg_scores).log().mean()
return loss
def predict_batch_users(self, user_ids):
user_embeddings = F.embedding(user_ids, self.user_embeddings)
item_embeddings = self.item_embeddings
return user_embeddings @ item_embeddings.T
def predict(self, eval_users, eval_pos, test_batch_size):
self.update_ngcf_embedding()
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
with torch.no_grad():
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
batch_users_torch = torch.LongTensor(batch_users).to(self.device)
pred_matrix[batch_users] = self.predict_batch_users(batch_users_torch).detach().cpu().numpy()
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix
##################################### LightGCN Code
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = torch.rand(len(values)) + keep_prob
random_index = random_index.int().bool()
index = index[random_index]
values = values[random_index]/keep_prob
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def _ngcf_embedding(self, graph):
users_emb = self.user_embedding.weight
items_emb = self.item_embedding.weight
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
if self.node_dropout > 0:
if self.training:
g_droped = self.__dropout(graph, self.node_dropout)
else:
g_droped = graph
else:
g_droped = graph
ego_emb = all_emb
for k in range(self.num_layers):
if self.split:
temp_emb = []
for f in range(len(g_droped)):
temp_emb.append(torch.sparse.mm(g_droped[f], ego_emb))
side_emb = torch.cat(temp_emb, dim=0)
else:
side_emb = torch.sparse.mm(g_droped, ego_emb)
sum_emb = torch.matmul(side_emb, self.weight_dict['W_gc_%d' % k]) + self.weight_dict['b_gc_%d' % k]
bi_emb = torch.mul(ego_emb, side_emb)
bi_emb = torch.matmul(bi_emb, self.weight_dict['W_bi_%d' % k]) + self.weight_dict['b_bi_%d' % k]
ego_emb = F.leaky_relu(sum_emb + bi_emb, negative_slope=0.2)
ego_emb = F.dropout(ego_emb, self.mess_dropout, training=self.training)
norm_emb = F.normalize(ego_emb, p=2, dim=1)
embs += [norm_emb]
embs = torch.stack(embs, dim=1)
ngcf_out = torch.mean(embs, dim=1)
users, items = torch.split(ngcf_out, [self.num_users, self.num_items])
return users, items
def _split_A_hat(self, A):
A_fold = []
fold_len = (self.num_users + self.num_items) // self.num_folds
for i_fold in range(self.num_folds):
start = i_fold*fold_len
if i_fold == self.num_folds - 1:
end = self.num_users + self.num_items
else:
end = (i_fold + 1) * fold_len
A_fold.append(self._convert_sp_mat_to_sp_tensor(A[start:end]).coalesce().to(self.device))
return A_fold
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
row = torch.Tensor(coo.row).long()
col = torch.Tensor(coo.col).long()
index = torch.stack([row, col])
data = torch.FloatTensor(coo.data)
return torch.sparse.FloatTensor(index, data, torch.Size(coo.shape))
def getSparseGraph(self, rating_matrix):
n_users, n_items = rating_matrix.shape
print("loading adjacency matrix")
filename = f'{self.data_name}_s_pre_adj_mat.npz'
try:
pre_adj_mat = sp.load_npz(os.path.join(self.path, filename))
print("successfully loaded...")
norm_adj = pre_adj_mat
except :
print("generating adjacency matrix")
s = time.time()
adj_mat = sp.dok_matrix((n_users + n_items, n_users + n_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = rating_matrix.tolil()
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat = adj_mat.todok()
# adj_mat = adj_mat + sp.eye(adj_mat.shape[0])
rowsum = np.array(adj_mat.sum(axis=1))
d_inv = np.power(rowsum, -0.5).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat = sp.diags(d_inv)
norm_adj = d_mat.dot(adj_mat)
norm_adj = norm_adj.dot(d_mat)
norm_adj = norm_adj.tocsr()
end = time.time()
print(f"costing {end-s}s, saved norm_mat...")
sp.save_npz(os.path.join(self.path, filename), norm_adj)
if self.split == True:
Graph = self._split_A_hat(norm_adj)
print("done split matrix")
else:
Graph = self._convert_sp_mat_to_sp_tensor(norm_adj)
Graph = Graph.coalesce().to(self.device)
print("don't split the matrix")
return Graph | 10,926 | 37.748227 | 118 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/__init__.py | # # Non-eural
from models.ItemKNN import ItemKNN
from models.PureSVD import PureSVD
from models.SLIMElastic import SLIM
from models.P3a import P3a
from models.RP3b import RP3b
from models.EASE import EASE
# # Neural
from models.DAE import DAE
from models.CDAE import CDAE
from models.MF import MF
from models.MultVAE import MultVAE
from models.NGCF import NGCF
from models.LightGCN import LightGCN
# __all__ = ['ItemKNN', 'PureSVD', 'P3a', 'RP3b', 'SLIM', 'EASE', 'DAE', 'CDAE', 'BPRMF', 'MultVAE'] | 500 | 28.470588 | 100 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/MF.py | """
Steffen Rendle et al., BPR: Bayesian Personalized Ranking from Implicit Feedback. UAI 2009.
https://arxiv.org/pdf/1205.2618
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import PointwiseGenerator, PairwiseGenerator
class MF(BaseModel):
def __init__(self, dataset, hparams, device):
super(MF, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.hidden_dim = hparams['hidden_dim']
self.pointwise = hparams['pointwise']
self.loss_func = F.mse_loss if hparams['loss_func'] == 'mse' else F.binary_cross_entropy_with_logits
self.user_embedding = nn.Embedding(self.num_users, self.hidden_dim)
self.item_embedding = nn.Embedding(self.num_items, self.hidden_dim)
self.device = device
self.to(device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def embeddings(self, user_ids, item_ids):
user_emb = self.user_embedding(user_ids)
item_emb = self.item_embedding(item_ids)
return user_emb, item_emb
def forward(self, user_ids, item_ids):
user_emb, item_emb = self.embeddings(user_ids, item_ids)
pred_rating = torch.sum(torch.mul(user_emb, item_emb), 1)
return pred_rating
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
# num_training = len(user_ids)
# num_batches = int(np.ceil(num_training / batch_size))
if self.pointwise:
batch_generator = PointwiseGenerator(
train_matrix, return_rating=True, num_negatives=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
else:
batch_generator = PairwiseGenerator(
train_matrix, num_negatives=1, num_positives_per_user=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
num_batches = len(batch_generator)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
# batch_ratings: rating if pointwise, negtive items if pairwise
for b, (batch_users, batch_pos, batch_ratings) in enumerate(batch_generator):
self.optimizer.zero_grad()
batch_loss = self.process_one_batch(batch_users, batch_pos, batch_ratings)
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def process_one_batch(self, users, items, ratings):
pos_ratings = self.forward(users, items)
if self.pointwise:
loss = self.loss_func(pos_ratings, ratings)
else:
neg_ratings = self.forward(users, ratings)
loss = -F.sigmoid(pos_ratings - neg_ratings).log().mean()
return loss
def predict_batch_users(self, user_ids):
user_latent = self.user_embedding(user_ids)
all_item_latent = self.item_embedding.weight.data
return user_latent @ all_item_latent.T
def predict(self, eval_users, eval_pos, test_batch_size):
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
with torch.no_grad():
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
batch_users_torch = torch.LongTensor(batch_users).to(self.device)
pred_matrix[batch_users] = self.predict_batch_users(batch_users_torch).detach().cpu().numpy()
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix
| 5,277 | 38.38806 | 109 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.